diff --git a/README.md b/README.md index 6355d28b0..120cece91 100755 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ yuzu emulator early access ============= -This is the source code for early-access 3035. +This is the source code for early-access 3038. ## Legal Notice diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index e7fe675cb..abeb5859b 100755 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -190,9 +190,6 @@ add_library(core STATIC hle/kernel/k_code_memory.h hle/kernel/k_condition_variable.cpp hle/kernel/k_condition_variable.h - hle/kernel/k_dynamic_page_manager.h - hle/kernel/k_dynamic_resource_manager.h - hle/kernel/k_dynamic_slab_heap.h hle/kernel/k_event.cpp hle/kernel/k_event.h hle/kernel/k_handle_table.cpp diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index 29ba562dc..953d96439 100755 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp @@ -134,14 +134,6 @@ void ARM_Interface::Run() { } system.ExitDynarmicProfile(); - // If the thread is scheduled for termination, exit the thread. - if (current_thread->HasDpc()) { - if (current_thread->IsTerminationRequested()) { - current_thread->Exit(); - UNREACHABLE(); - } - } - // Notify the debugger and go to sleep if a breakpoint was hit, // or if the thread is unable to continue for any reason. if (Has(hr, breakpoint) || Has(hr, no_execute)) { diff --git a/src/core/core.cpp b/src/core/core.cpp index 622a20510..1deeee154 100755 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -133,48 +133,6 @@ struct System::Impl { : kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{}, cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {} - void Initialize(System& system) { - device_memory = std::make_unique(); - - is_multicore = Settings::values.use_multi_core.GetValue(); - - core_timing.SetMulticore(is_multicore); - core_timing.Initialize([&system]() { system.RegisterHostThread(); }); - - const auto posix_time = std::chrono::system_clock::now().time_since_epoch(); - const auto current_time = - std::chrono::duration_cast(posix_time).count(); - Settings::values.custom_rtc_differential = - Settings::values.custom_rtc.value_or(current_time) - current_time; - - // Create a default fs if one doesn't already exist. - if (virtual_filesystem == nullptr) - virtual_filesystem = std::make_shared(); - if (content_provider == nullptr) - content_provider = std::make_unique(); - - // Create default implementations of applets if one is not provided. - applet_manager.SetDefaultAppletsIfMissing(); - - is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue(); - - kernel.SetMulticore(is_multicore); - cpu_manager.SetMulticore(is_multicore); - cpu_manager.SetAsyncGpu(is_async_gpu); - } - - void ReinitializeIfNecessary(System& system) { - if (is_multicore == Settings::values.use_multi_core.GetValue()) { - return; - } - - LOG_DEBUG(Kernel, "Re-initializing"); - - is_multicore = Settings::values.use_multi_core.GetValue(); - - Initialize(system); - } - SystemResultStatus Run() { std::unique_lock lk(suspend_guard); status = SystemResultStatus::Success; @@ -220,14 +178,37 @@ struct System::Impl { debugger = std::make_unique(system, port); } - SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) { + SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { LOG_DEBUG(Core, "initialized OK"); - // Setting changes may require a full system reinitialization (e.g., disabling multicore). - ReinitializeIfNecessary(system); + device_memory = std::make_unique(); + + is_multicore = Settings::values.use_multi_core.GetValue(); + is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue(); + + kernel.SetMulticore(is_multicore); + cpu_manager.SetMulticore(is_multicore); + cpu_manager.SetAsyncGpu(is_async_gpu); + core_timing.SetMulticore(is_multicore); kernel.Initialize(); cpu_manager.Initialize(); + core_timing.Initialize([&system]() { system.RegisterHostThread(); }); + + const auto posix_time = std::chrono::system_clock::now().time_since_epoch(); + const auto current_time = + std::chrono::duration_cast(posix_time).count(); + Settings::values.custom_rtc_differential = + Settings::values.custom_rtc.value_or(current_time) - current_time; + + // Create a default fs if one doesn't already exist. + if (virtual_filesystem == nullptr) + virtual_filesystem = std::make_shared(); + if (content_provider == nullptr) + content_provider = std::make_unique(); + + /// Create default implementations of applets if one is not provided. + applet_manager.SetDefaultAppletsIfMissing(); /// Reset all glue registrations arp_manager.ResetAll(); @@ -272,11 +253,11 @@ struct System::Impl { return SystemResultStatus::ErrorGetLoader; } - SystemResultStatus init_result{SetupForMainProcess(system, emu_window)}; + SystemResultStatus init_result{Init(system, emu_window)}; if (init_result != SystemResultStatus::Success) { LOG_CRITICAL(Core, "Failed to initialize system (Error {})!", static_cast(init_result)); - ShutdownMainProcess(); + Shutdown(); return init_result; } @@ -295,7 +276,7 @@ struct System::Impl { const auto [load_result, load_parameters] = app_loader->Load(*main_process, system); if (load_result != Loader::ResultStatus::Success) { LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result); - ShutdownMainProcess(); + Shutdown(); return static_cast( static_cast(SystemResultStatus::ErrorLoader) + static_cast(load_result)); @@ -354,7 +335,7 @@ struct System::Impl { return status; } - void ShutdownMainProcess() { + void Shutdown() { SetShuttingDown(true); // Log last frame performance stats if game was loded @@ -388,7 +369,7 @@ struct System::Impl { cheat_engine.reset(); telemetry_session.reset(); time_manager.Shutdown(); - core_timing.ClearPendingEvents(); + core_timing.Shutdown(); app_loader.reset(); audio_core.reset(); gpu_core.reset(); @@ -396,6 +377,7 @@ struct System::Impl { perf_stats.reset(); kernel.Shutdown(); memory.Reset(); + applet_manager.ClearAll(); if (auto room_member = room_network.GetRoomMember().lock()) { Network::GameInfo game_info{}; @@ -538,10 +520,6 @@ const CpuManager& System::GetCpuManager() const { return impl->cpu_manager; } -void System::Initialize() { - impl->Initialize(*this); -} - SystemResultStatus System::Run() { return impl->Run(); } @@ -562,8 +540,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) { impl->kernel.InvalidateCpuInstructionCacheRange(addr, size); } -void System::ShutdownMainProcess() { - impl->ShutdownMainProcess(); +void System::Shutdown() { + impl->Shutdown(); } bool System::IsShuttingDown() const { diff --git a/src/core/core.h b/src/core/core.h index 4ebedffd9..7843cc8ad 100755 --- a/src/core/core.h +++ b/src/core/core.h @@ -142,12 +142,6 @@ public: System(System&&) = delete; System& operator=(System&&) = delete; - /** - * Initializes the system - * This function will initialize core functionaility used for system emulation - */ - void Initialize(); - /** * Run the OS and Application * This function will start emulation and run the relevant devices @@ -172,8 +166,8 @@ public: void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); - /// Shutdown the main emulated process. - void ShutdownMainProcess(); + /// Shutdown the emulated system. + void Shutdown(); /// Check if the core is shutting down. [[nodiscard]] bool IsShuttingDown() const; diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 0e7b5f943..2678ce532 100755 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp @@ -40,9 +40,7 @@ struct CoreTiming::Event { CoreTiming::CoreTiming() : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {} -CoreTiming::~CoreTiming() { - Reset(); -} +CoreTiming::~CoreTiming() = default; void CoreTiming::ThreadEntry(CoreTiming& instance) { constexpr char name[] = "HostTiming"; @@ -55,7 +53,6 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) { } void CoreTiming::Initialize(std::function&& on_thread_init_) { - Reset(); on_thread_init = std::move(on_thread_init_); event_fifo_id = 0; shutting_down = false; @@ -68,8 +65,17 @@ void CoreTiming::Initialize(std::function&& on_thread_init_) { } } -void CoreTiming::ClearPendingEvents() { - event_queue.clear(); +void CoreTiming::Shutdown() { + paused = true; + shutting_down = true; + pause_event.Set(); + event.Set(); + if (timer_thread) { + timer_thread->join(); + } + ClearPendingEvents(); + timer_thread.reset(); + has_started = false; } void CoreTiming::Pause(bool is_paused) { @@ -190,6 +196,10 @@ u64 CoreTiming::GetClockTicks() const { return CpuCyclesToClockCycles(ticks); } +void CoreTiming::ClearPendingEvents() { + event_queue.clear(); +} + void CoreTiming::RemoveEvent(const std::shared_ptr& event_type) { std::scoped_lock lock{basic_lock}; @@ -297,18 +307,6 @@ void CoreTiming::ThreadLoop() { } } -void CoreTiming::Reset() { - paused = true; - shutting_down = true; - pause_event.Set(); - event.Set(); - if (timer_thread) { - timer_thread->join(); - } - timer_thread.reset(); - has_started = false; -} - std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const { if (is_multicore) { return clock->GetTimeNS(); diff --git a/src/core/core_timing.h b/src/core/core_timing.h index b5925193c..3259397b2 100755 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h @@ -61,14 +61,19 @@ public: /// required to end slice - 1 and start slice 0 before the first cycle of code is executed. void Initialize(std::function&& on_thread_init_); - /// Clear all pending events. This should ONLY be done on exit. - void ClearPendingEvents(); + /// Tears down all timing related functionality. + void Shutdown(); /// Sets if emulation is multicore or single core, must be set before Initialize void SetMulticore(bool is_multicore_) { is_multicore = is_multicore_; } + /// Check if it's using host timing. + bool IsHostTiming() const { + return is_multicore; + } + /// Pauses/Unpauses the execution of the timer thread. void Pause(bool is_paused); @@ -131,11 +136,12 @@ public: private: struct Event; + /// Clear all pending events. This should ONLY be done on exit. + void ClearPendingEvents(); + static void ThreadEntry(CoreTiming& instance); void ThreadLoop(); - void Reset(); - std::unique_ptr clock; s64 global_timer = 0; diff --git a/src/core/device_memory.h b/src/core/device_memory.h index 90510733c..df61b0c0b 100755 --- a/src/core/device_memory.h +++ b/src/core/device_memory.h @@ -31,14 +31,12 @@ public: DramMemoryMap::Base; } - template - T* GetPointer(PAddr addr) { - return reinterpret_cast(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base)); + u8* GetPointer(PAddr addr) { + return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); } - template - const T* GetPointer(PAddr addr) const { - return reinterpret_cast(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base)); + const u8* GetPointer(PAddr addr) const { + return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); } Common::HostMemory buffer; diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index c84d36c8c..9b6b284d0 100755 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -94,8 +94,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd // TODO(bunnei): Fix this once we support the kernel virtual memory layout. if (size > 0) { - void* backing_kernel_memory{system.DeviceMemory().GetPointer( - TranslateSlabAddrToPhysical(memory_layout, start))}; + void* backing_kernel_memory{ + system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))}; const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); ASSERT(region != nullptr); @@ -181,7 +181,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) { ASSERT(slab_address != 0); // Initialize the slabheap. - KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), + KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), slab_size); } diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index 4b1c134d4..da57ceb21 100755 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp @@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si // Clear the memory. for (const auto& block : m_page_group.Nodes()) { - std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); + std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); } // Set remaining tracking members. diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp index 4a6b60d26..1b577a5b3 100755 --- a/src/core/hle/kernel/k_interrupt_manager.cpp +++ b/src/core/hle/kernel/k_interrupt_manager.cpp @@ -11,34 +11,29 @@ namespace Kernel::KInterruptManager { void HandleInterrupt(KernelCore& kernel, s32 core_id) { + auto* process = kernel.CurrentProcess(); + if (!process) { + return; + } + // Acknowledge the interrupt. kernel.PhysicalCore(core_id).ClearInterrupt(); auto& current_thread = GetCurrentThread(kernel); - if (auto* process = kernel.CurrentProcess(); process) { - // If the user disable count is set, we may need to pin the current thread. - if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { - KScopedSchedulerLock sl{kernel}; + // If the user disable count is set, we may need to pin the current thread. + if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { + KScopedSchedulerLock sl{kernel}; - // Pin the current thread. - process->PinCurrentThread(core_id); + // Pin the current thread. + process->PinCurrentThread(core_id); - // Set the interrupt flag for the thread. - GetCurrentThread(kernel).SetInterruptFlag(); - } + // Set the interrupt flag for the thread. + GetCurrentThread(kernel).SetInterruptFlag(); } // Request interrupt scheduling. kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); } -void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) { - for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) { - if (core_mask & (1ULL << core_id)) { - kernel.PhysicalCore(core_id).Interrupt(); - } - } -} - } // namespace Kernel::KInterruptManager diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h index 803dc9211..f103dfe3f 100755 --- a/src/core/hle/kernel/k_interrupt_manager.h +++ b/src/core/hle/kernel/k_interrupt_manager.h @@ -11,8 +11,6 @@ class KernelCore; namespace KInterruptManager { void HandleInterrupt(KernelCore& kernel, s32 core_id); -void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask); - -} // namespace KInterruptManager +} } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 9444f6bd2..18df1f836 100755 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -6,7 +6,6 @@ #include "common/alignment.h" #include "common/assert.h" #include "common/common_types.h" -#include "common/intrusive_red_black_tree.h" #include "core/hle/kernel/memory_types.h" #include "core/hle/kernel/svc_types.h" @@ -169,8 +168,9 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per enum class KMemoryAttribute : u8 { None = 0x00, - All = 0xFF, - UserMask = All, + Mask = 0x7F, + All = Mask, + DontCareMask = 0x80, Locked = static_cast(Svc::MemoryAttribute::Locked), IpcLocked = static_cast(Svc::MemoryAttribute::IpcLocked), @@ -178,112 +178,76 @@ enum class KMemoryAttribute : u8 { Uncached = static_cast(Svc::MemoryAttribute::Uncached), SetMask = Uncached, + + IpcAndDeviceMapped = IpcLocked | DeviceShared, + LockedAndIpcLocked = Locked | IpcLocked, + DeviceSharedAndUncached = DeviceShared | Uncached }; DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); -enum class KMemoryBlockDisableMergeAttribute : u8 { - None = 0, - Normal = (1u << 0), - DeviceLeft = (1u << 1), - IpcLeft = (1u << 2), - Locked = (1u << 3), - DeviceRight = (1u << 4), - - AllLeft = Normal | DeviceLeft | IpcLeft | Locked, - AllRight = DeviceRight, -}; -DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute); +static_assert((static_cast(KMemoryAttribute::Mask) & + static_cast(KMemoryAttribute::DontCareMask)) == 0); struct KMemoryInfo { - uintptr_t m_address; - size_t m_size; - KMemoryState m_state; - u16 m_device_disable_merge_left_count; - u16 m_device_disable_merge_right_count; - u16 m_ipc_lock_count; - u16 m_device_use_count; - u16 m_ipc_disable_merge_count; - KMemoryPermission m_permission; - KMemoryAttribute m_attribute; - KMemoryPermission m_original_permission; - KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; + VAddr addr{}; + std::size_t size{}; + KMemoryState state{}; + KMemoryPermission perm{}; + KMemoryAttribute attribute{}; + KMemoryPermission original_perm{}; + u16 ipc_lock_count{}; + u16 device_use_count{}; constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { return { - .addr = m_address, - .size = m_size, - .state = static_cast(m_state & KMemoryState::Mask), - .attr = static_cast(m_attribute & KMemoryAttribute::UserMask), - .perm = static_cast(m_permission & KMemoryPermission::UserMask), - .ipc_refcount = m_ipc_lock_count, - .device_refcount = m_device_use_count, - .padding = {}, + addr, + size, + static_cast(state & KMemoryState::Mask), + static_cast(attribute & KMemoryAttribute::Mask), + static_cast(perm & KMemoryPermission::UserMask), + ipc_lock_count, + device_use_count, }; } - constexpr uintptr_t GetAddress() const { - return m_address; + constexpr VAddr GetAddress() const { + return addr; } - - constexpr size_t GetSize() const { - return m_size; + constexpr std::size_t GetSize() const { + return size; } - - constexpr size_t GetNumPages() const { - return this->GetSize() / PageSize; + constexpr std::size_t GetNumPages() const { + return GetSize() / PageSize; } - - constexpr uintptr_t GetEndAddress() const { - return this->GetAddress() + this->GetSize(); + constexpr VAddr GetEndAddress() const { + return GetAddress() + GetSize(); } - - constexpr uintptr_t GetLastAddress() const { - return this->GetEndAddress() - 1; + constexpr VAddr GetLastAddress() const { + return GetEndAddress() - 1; } - - constexpr u16 GetIpcLockCount() const { - return m_ipc_lock_count; - } - - constexpr u16 GetIpcDisableMergeCount() const { - return m_ipc_disable_merge_count; - } - constexpr KMemoryState GetState() const { - return m_state; + return state; } - - constexpr KMemoryPermission GetPermission() const { - return m_permission; - } - - constexpr KMemoryPermission GetOriginalPermission() const { - return m_original_permission; - } - constexpr KMemoryAttribute GetAttribute() const { - return m_attribute; + return attribute; } - - constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { - return m_disable_merge_attribute; + constexpr KMemoryPermission GetPermission() const { + return perm; } }; -class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode { +class KMemoryBlock final { + friend class KMemoryBlockManager; + private: - u16 m_device_disable_merge_left_count; - u16 m_device_disable_merge_right_count; - VAddr m_address; - size_t m_num_pages; - KMemoryState m_memory_state; - u16 m_ipc_lock_count; - u16 m_device_use_count; - u16 m_ipc_disable_merge_count; - KMemoryPermission m_permission; - KMemoryPermission m_original_permission; - KMemoryAttribute m_attribute; - KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; + VAddr addr{}; + std::size_t num_pages{}; + KMemoryState state{KMemoryState::None}; + u16 ipc_lock_count{}; + u16 device_use_count{}; + KMemoryPermission perm{KMemoryPermission::None}; + KMemoryPermission original_perm{KMemoryPermission::None}; + KMemoryAttribute attribute{KMemoryAttribute::None}; public: static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) { @@ -297,349 +261,113 @@ public: } public: + constexpr KMemoryBlock() = default; + constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_, + KMemoryPermission perm_, KMemoryAttribute attribute_) + : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {} + constexpr VAddr GetAddress() const { - return m_address; + return addr; } - constexpr size_t GetNumPages() const { - return m_num_pages; + constexpr std::size_t GetNumPages() const { + return num_pages; } - constexpr size_t GetSize() const { - return this->GetNumPages() * PageSize; + constexpr std::size_t GetSize() const { + return GetNumPages() * PageSize; } constexpr VAddr GetEndAddress() const { - return this->GetAddress() + this->GetSize(); + return GetAddress() + GetSize(); } constexpr VAddr GetLastAddress() const { - return this->GetEndAddress() - 1; - } - - constexpr u16 GetIpcLockCount() const { - return m_ipc_lock_count; - } - - constexpr u16 GetIpcDisableMergeCount() const { - return m_ipc_disable_merge_count; - } - - constexpr KMemoryPermission GetPermission() const { - return m_permission; - } - - constexpr KMemoryPermission GetOriginalPermission() const { - return m_original_permission; - } - - constexpr KMemoryAttribute GetAttribute() const { - return m_attribute; + return GetEndAddress() - 1; } constexpr KMemoryInfo GetMemoryInfo() const { return { - .m_address = this->GetAddress(), - .m_size = this->GetSize(), - .m_state = m_memory_state, - .m_device_disable_merge_left_count = m_device_disable_merge_left_count, - .m_device_disable_merge_right_count = m_device_disable_merge_right_count, - .m_ipc_lock_count = m_ipc_lock_count, - .m_device_use_count = m_device_use_count, - .m_ipc_disable_merge_count = m_ipc_disable_merge_count, - .m_permission = m_permission, - .m_attribute = m_attribute, - .m_original_permission = m_original_permission, - .m_disable_merge_attribute = m_disable_merge_attribute, + GetAddress(), GetSize(), state, perm, + attribute, original_perm, ipc_lock_count, device_use_count, }; } -public: - explicit KMemoryBlock() = default; - - constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, - KMemoryAttribute attr) - : Common::IntrusiveRedBlackTreeBaseNode(), - m_device_disable_merge_left_count(), m_device_disable_merge_right_count(), - m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0), - m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p), - m_original_permission(KMemoryPermission::None), m_attribute(attr), - m_disable_merge_attribute() {} - - constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, - KMemoryAttribute attr) { - m_device_disable_merge_left_count = 0; - m_device_disable_merge_right_count = 0; - m_address = addr; - m_num_pages = np; - m_memory_state = ms; - m_ipc_lock_count = 0; - m_device_use_count = 0; - m_permission = p; - m_original_permission = KMemoryPermission::None; - m_attribute = attr; - m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None; + void ShareToDevice(KMemoryPermission /*new_perm*/) { + ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || + device_use_count == 0); + attribute |= KMemoryAttribute::DeviceShared; + const u16 new_use_count{++device_use_count}; + ASSERT(new_use_count > 0); } + void UnshareToDevice(KMemoryPermission /*new_perm*/) { + ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); + const u16 prev_use_count{device_use_count--}; + ASSERT(prev_use_count > 0); + if (prev_use_count == 1) { + attribute &= ~KMemoryAttribute::DeviceShared; + } + } + +private: constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { - constexpr auto AttributeIgnoreMask = - KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; - return m_memory_state == s && m_permission == p && - (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); + constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask | + KMemoryAttribute::IpcLocked | + KMemoryAttribute::DeviceShared}; + return state == s && perm == p && + (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); } constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { - return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission && - m_original_permission == rhs.m_original_permission && - m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count && - m_device_use_count == rhs.m_device_use_count; + return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm && + attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count && + device_use_count == rhs.device_use_count; } - constexpr bool CanMergeWith(const KMemoryBlock& rhs) const { - return this->HasSameProperties(rhs) && - (m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) == - KMemoryBlockDisableMergeAttribute::None && - (rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) == - KMemoryBlockDisableMergeAttribute::None; + constexpr bool Contains(VAddr start) const { + return GetAddress() <= start && start <= GetEndAddress(); } - constexpr bool Contains(VAddr addr) const { - return this->GetAddress() <= addr && addr <= this->GetEndAddress(); + constexpr void Add(std::size_t count) { + ASSERT(count > 0); + ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1); + + num_pages += count; } - constexpr void Add(const KMemoryBlock& added_block) { - ASSERT(added_block.GetNumPages() > 0); - ASSERT(this->GetAddress() + added_block.GetSize() - 1 < - this->GetEndAddress() + added_block.GetSize() - 1); + constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm, + KMemoryAttribute new_attribute) { + ASSERT(original_perm == KMemoryPermission::None); + ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None); - m_num_pages += added_block.GetNumPages(); - m_disable_merge_attribute = static_cast( - m_disable_merge_attribute | added_block.m_disable_merge_attribute); - m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count; + state = new_state; + perm = new_perm; + + attribute = static_cast( + new_attribute | + (attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); } - constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a, - bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) { - ASSERT(m_original_permission == KMemoryPermission::None); - ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None); + constexpr KMemoryBlock Split(VAddr split_addr) { + ASSERT(GetAddress() < split_addr); + ASSERT(Contains(split_addr)); + ASSERT(Common::IsAligned(split_addr, PageSize)); - m_memory_state = s; - m_permission = p; - m_attribute = static_cast( - a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); + KMemoryBlock block; + block.addr = addr; + block.num_pages = (split_addr - GetAddress()) / PageSize; + block.state = state; + block.ipc_lock_count = ipc_lock_count; + block.device_use_count = device_use_count; + block.perm = perm; + block.original_perm = original_perm; + block.attribute = attribute; - if (set_disable_merge_attr && set_mask != 0) { - m_disable_merge_attribute = m_disable_merge_attribute | - static_cast(set_mask); - } - if (clear_mask != 0) { - m_disable_merge_attribute = m_disable_merge_attribute & - static_cast(~clear_mask); - } - } + addr = split_addr; + num_pages -= block.num_pages; - constexpr void Split(KMemoryBlock* block, VAddr addr) { - ASSERT(this->GetAddress() < addr); - ASSERT(this->Contains(addr)); - ASSERT(Common::IsAligned(addr, PageSize)); - - block->m_address = m_address; - block->m_num_pages = (addr - this->GetAddress()) / PageSize; - block->m_memory_state = m_memory_state; - block->m_ipc_lock_count = m_ipc_lock_count; - block->m_device_use_count = m_device_use_count; - block->m_permission = m_permission; - block->m_original_permission = m_original_permission; - block->m_attribute = m_attribute; - block->m_disable_merge_attribute = static_cast( - m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft); - block->m_ipc_disable_merge_count = m_ipc_disable_merge_count; - block->m_device_disable_merge_left_count = m_device_disable_merge_left_count; - block->m_device_disable_merge_right_count = 0; - - m_address = addr; - m_num_pages -= block->m_num_pages; - - m_ipc_disable_merge_count = 0; - m_device_disable_merge_left_count = 0; - m_disable_merge_attribute = static_cast( - m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight); - } - - constexpr void UpdateDeviceDisableMergeStateForShareLeft( - [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { - if (left) { - m_disable_merge_attribute = static_cast( - m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); - const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count; - ASSERT(new_device_disable_merge_left_count > 0); - } - } - - constexpr void UpdateDeviceDisableMergeStateForShareRight( - [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { - if (right) { - m_disable_merge_attribute = static_cast( - m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); - const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count; - ASSERT(new_device_disable_merge_right_count > 0); - } - } - - constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left, - bool right) { - this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right); - this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right); - } - - constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, - bool right) { - // We must either be shared or have a zero lock count. - ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || - m_device_use_count == 0); - - // Share. - const u16 new_count = ++m_device_use_count; - ASSERT(new_count > 0); - - m_attribute = static_cast(m_attribute | KMemoryAttribute::DeviceShared); - - this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right); - } - - constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( - [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { - - if (left) { - if (!m_device_disable_merge_left_count) { - return; - } - --m_device_disable_merge_left_count; - } - - m_device_disable_merge_left_count = - std::min(m_device_disable_merge_left_count, m_device_use_count); - - if (m_device_disable_merge_left_count == 0) { - m_disable_merge_attribute = static_cast( - m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft); - } - } - - constexpr void UpdateDeviceDisableMergeStateForUnshareRight( - [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { - if (right) { - const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; - ASSERT(old_device_disable_merge_right_count > 0); - if (old_device_disable_merge_right_count == 1) { - m_disable_merge_attribute = static_cast( - m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight); - } - } - } - - constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left, - bool right) { - this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right); - this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); - } - - constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, - bool right) { - // We must be shared. - ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); - - // Unhare. - const u16 old_count = m_device_use_count--; - ASSERT(old_count > 0); - - if (old_count == 1) { - m_attribute = - static_cast(m_attribute & ~KMemoryAttribute::DeviceShared); - } - - this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right); - } - - constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, - bool right) { - - // We must be shared. - ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); - - // Unhare. - const u16 old_count = m_device_use_count--; - ASSERT(old_count > 0); - - if (old_count == 1) { - m_attribute = - static_cast(m_attribute & ~KMemoryAttribute::DeviceShared); - } - - this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); - } - - constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { - // We must either be locked or have a zero lock count. - ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked || - m_ipc_lock_count == 0); - - // Lock. - const u16 new_lock_count = ++m_ipc_lock_count; - ASSERT(new_lock_count > 0); - - // If this is our first lock, update our permissions. - if (new_lock_count == 1) { - ASSERT(m_original_permission == KMemoryPermission::None); - ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) == - (m_permission | KMemoryPermission::NotMapped)); - ASSERT((m_permission & KMemoryPermission::UserExecute) != - KMemoryPermission::UserExecute || - (new_perm == KMemoryPermission::UserRead)); - m_original_permission = m_permission; - m_permission = static_cast( - (new_perm & KMemoryPermission::IpcLockChangeMask) | - (m_original_permission & ~KMemoryPermission::IpcLockChangeMask)); - } - m_attribute = static_cast(m_attribute | KMemoryAttribute::IpcLocked); - - if (left) { - m_disable_merge_attribute = static_cast( - m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft); - const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count; - ASSERT(new_ipc_disable_merge_count > 0); - } - } - - constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, - [[maybe_unused]] bool right) { - // We must be locked. - ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); - - // Unlock. - const u16 old_lock_count = m_ipc_lock_count--; - ASSERT(old_lock_count > 0); - - // If this is our last unlock, update our permissions. - if (old_lock_count == 1) { - ASSERT(m_original_permission != KMemoryPermission::None); - m_permission = m_original_permission; - m_original_permission = KMemoryPermission::None; - m_attribute = static_cast(m_attribute & ~KMemoryAttribute::IpcLocked); - } - - if (left) { - const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--; - ASSERT(old_ipc_disable_merge_count > 0); - if (old_ipc_disable_merge_count == 1) { - m_disable_merge_attribute = static_cast( - m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft); - } - } - } - - constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { - return m_disable_merge_attribute; + return block; } }; static_assert(std::is_trivially_destructible::value); diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index cf4c1e371..3ddb9984f 100755 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp @@ -2,336 +2,221 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include "core/hle/kernel/k_memory_block_manager.h" +#include "core/hle/kernel/memory_types.h" namespace Kernel { -KMemoryBlockManager::KMemoryBlockManager() = default; - -Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) { - // Allocate a block to encapsulate the address space, insert it into the tree. - KMemoryBlock* start_block = slab_manager->Allocate(); - R_UNLESS(start_block != nullptr, ResultOutOfResource); - - // Set our start and end. - m_start_address = st; - m_end_address = nd; - ASSERT(Common::IsAligned(m_start_address, PageSize)); - ASSERT(Common::IsAligned(m_end_address, PageSize)); - - // Initialize and insert the block. - start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize, - KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None); - m_memory_block_tree.insert(*start_block); - - R_SUCCEED(); +KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_) + : start_addr{start_addr_}, end_addr{end_addr_} { + const u64 num_pages{(end_addr - start_addr) / PageSize}; + memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None); } -void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager, - HostUnmapCallback&& host_unmap_callback) { - // Erase every block until we have none left. - auto it = m_memory_block_tree.begin(); - while (it != m_memory_block_tree.end()) { - KMemoryBlock* block = std::addressof(*it); - it = m_memory_block_tree.erase(it); - slab_manager->Free(block); - host_unmap_callback(block->GetAddress(), block->GetSize()); +KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) { + auto node{memory_block_tree.begin()}; + while (node != end()) { + const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; + if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) { + return node; + } + node = std::next(node); + } + return end(); +} + +VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, + std::size_t num_pages, std::size_t align, + std::size_t offset, std::size_t guard_pages) { + if (num_pages == 0) { + return {}; } - ASSERT(m_memory_block_tree.empty()); -} + const VAddr region_end{region_start + region_num_pages * PageSize}; + const VAddr region_last{region_end - 1}; + for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) { + const auto info{it->GetMemoryInfo()}; + if (region_last < info.GetAddress()) { + break; + } -VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages, - size_t num_pages, size_t alignment, size_t offset, - size_t guard_pages) const { - if (num_pages > 0) { - const VAddr region_end = region_start + region_num_pages * PageSize; - const VAddr region_last = region_end - 1; - for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); - it++) { - const KMemoryInfo info = it->GetMemoryInfo(); - if (region_last < info.GetAddress()) { - break; - } - if (info.m_state != KMemoryState::Free) { - continue; - } + if (info.state != KMemoryState::Free) { + continue; + } - VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress(); - area += guard_pages * PageSize; + VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; + area += guard_pages * PageSize; - const VAddr offset_area = Common::AlignDown(area, alignment) + offset; - area = (area <= offset_area) ? offset_area : offset_area + alignment; + const VAddr offset_area{Common::AlignDown(area, align) + offset}; + area = (area <= offset_area) ? offset_area : offset_area + align; - const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize; - const VAddr area_last = area_end - 1; + const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; + const VAddr area_last{area_end - 1}; - if (info.GetAddress() <= area && area < area_last && area_last <= region_last && - area_last <= info.GetLastAddress()) { - return area; - } + if (info.GetAddress() <= area && area < area_last && area_last <= region_last && + area_last <= info.GetLastAddress()) { + return area; } } return {}; } -void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, - VAddr address, size_t num_pages) { - // Find the iterator now that we've updated. - iterator it = this->FindIterator(address); - if (address != m_start_address) { - it--; - } +void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, + KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, + KMemoryState state, KMemoryPermission perm, + KMemoryAttribute attribute) { + const VAddr update_end_addr{addr + num_pages * PageSize}; + iterator node{memory_block_tree.begin()}; - // Coalesce blocks that we can. - while (true) { - iterator prev = it++; - if (it == m_memory_block_tree.end()) { + prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped; + + while (node != memory_block_tree.end()) { + KMemoryBlock* block{&(*node)}; + iterator next_node{std::next(node)}; + const VAddr cur_addr{block->GetAddress()}; + const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + + if (addr < cur_end_addr && cur_addr < update_end_addr) { + if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { + node = next_node; + continue; + } + + iterator new_node{node}; + if (addr > cur_addr) { + memory_block_tree.insert(node, block->Split(addr)); + } + + if (update_end_addr < cur_end_addr) { + new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); + } + + new_node->Update(state, perm, attribute); + + MergeAdjacent(new_node, next_node); + } + + if (cur_end_addr - 1 >= update_end_addr - 1) { break; } - if (prev->CanMergeWith(*it)) { - KMemoryBlock* block = std::addressof(*it); - m_memory_block_tree.erase(it); - prev->Add(*block); - allocator->Free(block); - it = prev; + node = next_node; + } +} + +void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state, + KMemoryPermission perm, KMemoryAttribute attribute) { + const VAddr update_end_addr{addr + num_pages * PageSize}; + iterator node{memory_block_tree.begin()}; + + while (node != memory_block_tree.end()) { + KMemoryBlock* block{&(*node)}; + iterator next_node{std::next(node)}; + const VAddr cur_addr{block->GetAddress()}; + const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + + if (addr < cur_end_addr && cur_addr < update_end_addr) { + iterator new_node{node}; + + if (addr > cur_addr) { + memory_block_tree.insert(node, block->Split(addr)); + } + + if (update_end_addr < cur_end_addr) { + new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); + } + + new_node->Update(state, perm, attribute); + + MergeAdjacent(new_node, next_node); } - if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) { + if (cur_end_addr - 1 >= update_end_addr - 1) { break; } + + node = next_node; } } -void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, - size_t num_pages, KMemoryState state, KMemoryPermission perm, - KMemoryAttribute attr, - KMemoryBlockDisableMergeAttribute set_disable_attr, - KMemoryBlockDisableMergeAttribute clear_disable_attr) { - // Ensure for auditing that we never end up with an invalid tree. - KScopedMemoryBlockManagerAuditor auditor(this); - ASSERT(Common::IsAligned(address, PageSize)); - ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == - KMemoryAttribute::None); - - VAddr cur_address = address; - size_t remaining_pages = num_pages; - iterator it = this->FindIterator(address); - - while (remaining_pages > 0) { - const size_t remaining_size = remaining_pages * PageSize; - KMemoryInfo cur_info = it->GetMemoryInfo(); - if (it->HasProperties(state, perm, attr)) { - // If we already have the right properties, just advance. - if (cur_address + remaining_size < cur_info.GetEndAddress()) { - remaining_pages = 0; - cur_address += remaining_size; - } else { - remaining_pages = - (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; - cur_address = cur_info.GetEndAddress(); - } - } else { - // If we need to, create a new block before and insert it. - if (cur_info.GetAddress() != cur_address) { - KMemoryBlock* new_block = allocator->Allocate(); - - it->Split(new_block, cur_address); - it = m_memory_block_tree.insert(*new_block); - it++; - - cur_info = it->GetMemoryInfo(); - cur_address = cur_info.GetAddress(); - } - - // If we need to, create a new block after and insert it. - if (cur_info.GetSize() > remaining_size) { - KMemoryBlock* new_block = allocator->Allocate(); - - it->Split(new_block, cur_address + remaining_size); - it = m_memory_block_tree.insert(*new_block); - - cur_info = it->GetMemoryInfo(); - } - - // Update block state. - it->Update(state, perm, attr, cur_address == address, static_cast(set_disable_attr), - static_cast(clear_disable_attr)); - cur_address += cur_info.GetSize(); - remaining_pages -= cur_info.GetNumPages(); - } - it++; - } - - this->CoalesceForUpdate(allocator, address, num_pages); -} - -void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, - VAddr address, size_t num_pages, KMemoryState test_state, - KMemoryPermission test_perm, KMemoryAttribute test_attr, - KMemoryState state, KMemoryPermission perm, - KMemoryAttribute attr) { - // Ensure for auditing that we never end up with an invalid tree. - KScopedMemoryBlockManagerAuditor auditor(this); - ASSERT(Common::IsAligned(address, PageSize)); - ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == - KMemoryAttribute::None); - - VAddr cur_address = address; - size_t remaining_pages = num_pages; - iterator it = this->FindIterator(address); - - while (remaining_pages > 0) { - const size_t remaining_size = remaining_pages * PageSize; - KMemoryInfo cur_info = it->GetMemoryInfo(); - if (it->HasProperties(test_state, test_perm, test_attr) && - !it->HasProperties(state, perm, attr)) { - // If we need to, create a new block before and insert it. - if (cur_info.GetAddress() != cur_address) { - KMemoryBlock* new_block = allocator->Allocate(); - - it->Split(new_block, cur_address); - it = m_memory_block_tree.insert(*new_block); - it++; - - cur_info = it->GetMemoryInfo(); - cur_address = cur_info.GetAddress(); - } - - // If we need to, create a new block after and insert it. - if (cur_info.GetSize() > remaining_size) { - KMemoryBlock* new_block = allocator->Allocate(); - - it->Split(new_block, cur_address + remaining_size); - it = m_memory_block_tree.insert(*new_block); - - cur_info = it->GetMemoryInfo(); - } - - // Update block state. - it->Update(state, perm, attr, false, 0, 0); - cur_address += cur_info.GetSize(); - remaining_pages -= cur_info.GetNumPages(); - } else { - // If we already have the right properties, just advance. - if (cur_address + remaining_size < cur_info.GetEndAddress()) { - remaining_pages = 0; - cur_address += remaining_size; - } else { - remaining_pages = - (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; - cur_address = cur_info.GetEndAddress(); - } - } - it++; - } - - this->CoalesceForUpdate(allocator, address, num_pages); -} - -void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, - size_t num_pages, MemoryBlockLockFunction lock_func, +void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, KMemoryPermission perm) { - // Ensure for auditing that we never end up with an invalid tree. - KScopedMemoryBlockManagerAuditor auditor(this); - ASSERT(Common::IsAligned(address, PageSize)); + const VAddr update_end_addr{addr + num_pages * PageSize}; + iterator node{memory_block_tree.begin()}; - VAddr cur_address = address; - size_t remaining_pages = num_pages; - iterator it = this->FindIterator(address); + while (node != memory_block_tree.end()) { + KMemoryBlock* block{&(*node)}; + iterator next_node{std::next(node)}; + const VAddr cur_addr{block->GetAddress()}; + const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; - const VAddr end_address = address + (num_pages * PageSize); + if (addr < cur_end_addr && cur_addr < update_end_addr) { + iterator new_node{node}; - while (remaining_pages > 0) { - const size_t remaining_size = remaining_pages * PageSize; - KMemoryInfo cur_info = it->GetMemoryInfo(); + if (addr > cur_addr) { + memory_block_tree.insert(node, block->Split(addr)); + } - // If we need to, create a new block before and insert it. - if (cur_info.m_address != cur_address) { - KMemoryBlock* new_block = allocator->Allocate(); + if (update_end_addr < cur_end_addr) { + new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); + } - it->Split(new_block, cur_address); - it = m_memory_block_tree.insert(*new_block); - it++; + lock_func(new_node, perm); - cur_info = it->GetMemoryInfo(); - cur_address = cur_info.GetAddress(); + MergeAdjacent(new_node, next_node); } - if (cur_info.GetSize() > remaining_size) { - // If we need to, create a new block after and insert it. - KMemoryBlock* new_block = allocator->Allocate(); - - it->Split(new_block, cur_address + remaining_size); - it = m_memory_block_tree.insert(*new_block); - - cur_info = it->GetMemoryInfo(); + if (cur_end_addr - 1 >= update_end_addr - 1) { + break; } - // Call the locked update function. - (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address, - cur_info.GetEndAddress() == end_address); - cur_address += cur_info.GetSize(); - remaining_pages -= cur_info.GetNumPages(); - it++; + node = next_node; } - - this->CoalesceForUpdate(allocator, address, num_pages); } -// Debug. -bool KMemoryBlockManager::CheckState() const { - // Loop over every block, ensuring that we are sorted and coalesced. - auto it = m_memory_block_tree.cbegin(); - auto prev = it++; - while (it != m_memory_block_tree.cend()) { - const KMemoryInfo prev_info = prev->GetMemoryInfo(); - const KMemoryInfo cur_info = it->GetMemoryInfo(); +void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { + const_iterator it{FindIterator(start)}; + KMemoryInfo info{}; + do { + info = it->GetMemoryInfo(); + func(info); + it = std::next(it); + } while (info.addr + info.size - 1 < end - 1 && it != cend()); +} - // Sequential blocks which can be merged should be merged. - if (prev->CanMergeWith(*it)) { - return false; +void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) { + KMemoryBlock* block{&(*it)}; + + auto EraseIt = [&](const iterator it_to_erase) { + if (next_it == it_to_erase) { + next_it = std::next(next_it); } + memory_block_tree.erase(it_to_erase); + }; - // Sequential blocks should be sequential. - if (prev_info.GetEndAddress() != cur_info.GetAddress()) { - return false; - } + if (it != memory_block_tree.begin()) { + KMemoryBlock* prev{&(*std::prev(it))}; - // If the block is ipc locked, it must have a count. - if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && - cur_info.m_ipc_lock_count == 0) { - return false; - } + if (block->HasSameProperties(*prev)) { + const iterator prev_it{std::prev(it)}; - // If the block is device shared, it must have a count. - if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && - cur_info.m_device_use_count == 0) { - return false; - } + prev->Add(block->GetNumPages()); + EraseIt(it); - // Advance the iterator. - prev = it++; - } - - // Our loop will miss checking the last block, potentially, so check it. - if (prev != m_memory_block_tree.cend()) { - const KMemoryInfo prev_info = prev->GetMemoryInfo(); - // If the block is ipc locked, it must have a count. - if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && - prev_info.m_ipc_lock_count == 0) { - return false; - } - - // If the block is device shared, it must have a count. - if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && - prev_info.m_device_use_count == 0) { - return false; + it = prev_it; + block = prev; } } - return true; + if (it != cend()) { + const KMemoryBlock* const next{&(*std::next(it))}; + + if (block->HasSameProperties(*next)) { + block->Add(next->GetNumPages()); + EraseIt(std::next(it)); + } + } } } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h index 99705fc8c..e14741b89 100755 --- a/src/core/hle/kernel/k_memory_block_manager.h +++ b/src/core/hle/kernel/k_memory_block_manager.h @@ -4,154 +4,63 @@ #pragma once #include +#include -#include "common/common_funcs.h" #include "common/common_types.h" -#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_memory_block.h" namespace Kernel { -class KMemoryBlockManagerUpdateAllocator { -public: - static constexpr size_t MaxBlocks = 2; - -private: - KMemoryBlock* m_blocks[MaxBlocks]; - size_t m_index; - KMemoryBlockSlabManager* m_slab_manager; - -private: - Result Initialize(size_t num_blocks) { - // Check num blocks. - ASSERT(num_blocks <= MaxBlocks); - - // Set index. - m_index = MaxBlocks - num_blocks; - - // Allocate the blocks. - for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) { - m_blocks[m_index + i] = m_slab_manager->Allocate(); - R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource); - } - - R_SUCCEED(); - } - -public: - KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm, - size_t num_blocks = MaxBlocks) - : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) { - *out_result = this->Initialize(num_blocks); - } - - ~KMemoryBlockManagerUpdateAllocator() { - for (const auto& block : m_blocks) { - if (block != nullptr) { - m_slab_manager->Free(block); - } - } - } - - KMemoryBlock* Allocate() { - ASSERT(m_index < MaxBlocks); - ASSERT(m_blocks[m_index] != nullptr); - KMemoryBlock* block = nullptr; - std::swap(block, m_blocks[m_index++]); - return block; - } - - void Free(KMemoryBlock* block) { - ASSERT(m_index <= MaxBlocks); - ASSERT(block != nullptr); - if (m_index == 0) { - m_slab_manager->Free(block); - } else { - m_blocks[--m_index] = block; - } - } -}; - class KMemoryBlockManager final { public: - using MemoryBlockTree = - Common::IntrusiveRedBlackTreeBaseTraits::TreeType; - using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left, - bool right); + using MemoryBlockTree = std::list; using iterator = MemoryBlockTree::iterator; using const_iterator = MemoryBlockTree::const_iterator; public: - KMemoryBlockManager(); - - using HostUnmapCallback = std::function; - - Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager); - void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback); + KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_); iterator end() { - return m_memory_block_tree.end(); + return memory_block_tree.end(); } const_iterator end() const { - return m_memory_block_tree.end(); + return memory_block_tree.end(); } const_iterator cend() const { - return m_memory_block_tree.cend(); + return memory_block_tree.cend(); } - VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, - size_t alignment, size_t offset, size_t guard_pages) const; + iterator FindIterator(VAddr addr); - void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, - KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, - KMemoryBlockDisableMergeAttribute set_disable_attr, - KMemoryBlockDisableMergeAttribute clear_disable_attr); - void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, - MemoryBlockLockFunction lock_func, KMemoryPermission perm); + VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, + std::size_t align, std::size_t offset, std::size_t guard_pages); - void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, - size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, - KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, - KMemoryAttribute attr); + void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, + KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state, + KMemoryPermission perm, KMemoryAttribute attribute); - iterator FindIterator(VAddr address) const { - return m_memory_block_tree.find(KMemoryBlock( - address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None)); + void Update(VAddr addr, std::size_t num_pages, KMemoryState state, + KMemoryPermission perm = KMemoryPermission::None, + KMemoryAttribute attribute = KMemoryAttribute::None); + + using LockFunc = std::function; + void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, + KMemoryPermission perm); + + using IterateFunc = std::function; + void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); + + KMemoryBlock& FindBlock(VAddr addr) { + return *FindIterator(addr); } - const KMemoryBlock* FindBlock(VAddr address) const { - if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) { - return std::addressof(*it); - } - - return nullptr; - } - - // Debug. - bool CheckState() const; - private: - void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, - size_t num_pages); + void MergeAdjacent(iterator it, iterator& next_it); - MemoryBlockTree m_memory_block_tree; - VAddr m_start_address{}; - VAddr m_end_address{}; -}; + [[maybe_unused]] const VAddr start_addr; + [[maybe_unused]] const VAddr end_addr; -class KScopedMemoryBlockManagerAuditor { -private: - KMemoryBlockManager* m_manager; - -public: - explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) { - ASSERT(m_manager->CheckState()); - } - explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m) - : KScopedMemoryBlockManagerAuditor(std::addressof(m)) {} - ~KScopedMemoryBlockManagerAuditor() { - ASSERT(m_manager->CheckState()); - } + MemoryBlockTree memory_block_tree; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 646711505..5b0a9963a 100755 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag // Set all the allocated memory. for (const auto& block : out->Nodes()) { - std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, + std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, block.GetSize()); } diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp index 0c16dded4..1a0bf4439 100755 --- a/src/core/hle/kernel/k_page_buffer.cpp +++ b/src/core/hle/kernel/k_page_buffer.cpp @@ -12,7 +12,7 @@ namespace Kernel { KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) { ASSERT(Common::IsAligned(phys_addr, PageSize)); - return system.DeviceMemory().GetPointer(phys_addr); + return reinterpret_cast(system.DeviceMemory().GetPointer(phys_addr)); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 307e491cb..d975de844 100755 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -25,7 +25,7 @@ namespace { using namespace Common::Literals; -constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { +constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { switch (as_type) { case FileSys::ProgramAddressSpaceType::Is32Bit: case FileSys::ProgramAddressSpaceType::Is32BitNoMap: @@ -43,29 +43,27 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a } // namespace KPageTable::KPageTable(Core::System& system_) - : m_general_lock{system_.Kernel()}, - m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} + : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {} KPageTable::~KPageTable() = default; Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, size_t code_size, - KMemoryBlockSlabManager* mem_block_slab_manager, + VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool) { const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); }; const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); }; // Set our width and heap/alias sizes - m_address_space_width = GetAddressSpaceWidthFromType(as_type); + address_space_width = GetAddressSpaceWidthFromType(as_type); const VAddr start = 0; - const VAddr end{1ULL << m_address_space_width}; - size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; - size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; + const VAddr end{1ULL << address_space_width}; + std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; + std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; ASSERT(code_addr < code_addr + code_size); ASSERT(code_addr + code_size - 1 <= end - 1); @@ -77,65 +75,66 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type } // Set code regions and determine remaining - constexpr size_t RegionAlignment{2_MiB}; + constexpr std::size_t RegionAlignment{2_MiB}; VAddr process_code_start{}; VAddr process_code_end{}; - size_t stack_region_size{}; - size_t kernel_map_region_size{}; + std::size_t stack_region_size{}; + std::size_t kernel_map_region_size{}; - if (m_address_space_width == 39) { + if (address_space_width == 39) { alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); - m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); - m_alias_code_region_start = m_code_region_start; - m_alias_code_region_end = m_code_region_end; + code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); + code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); + alias_code_region_start = code_region_start; + alias_code_region_end = code_region_end; process_code_start = Common::AlignDown(code_addr, RegionAlignment); process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); } else { stack_region_size = 0; kernel_map_region_size = 0; - m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); - m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - m_stack_region_start = m_code_region_start; - m_alias_code_region_start = m_code_region_start; - m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + - GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); - m_stack_region_end = m_code_region_end; - m_kernel_map_region_start = m_code_region_start; - m_kernel_map_region_end = m_code_region_end; - process_code_start = m_code_region_start; - process_code_end = m_code_region_end; + code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); + code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); + stack_region_start = code_region_start; + alias_code_region_start = code_region_start; + alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + + GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); + stack_region_end = code_region_end; + kernel_map_region_start = code_region_start; + kernel_map_region_end = code_region_end; + process_code_start = code_region_start; + process_code_end = code_region_end; } // Set other basic fields - m_enable_aslr = enable_aslr; - m_enable_device_address_space_merge = false; - m_address_space_start = start; - m_address_space_end = end; - m_is_kernel = false; - m_memory_block_slab_manager = mem_block_slab_manager; + is_aslr_enabled = enable_aslr; + address_space_start = start; + address_space_end = end; + is_kernel = false; // Determine the region we can place our undetermineds in VAddr alloc_start{}; - size_t alloc_size{}; - if ((process_code_start - m_code_region_start) >= (end - process_code_end)) { - alloc_start = m_code_region_start; - alloc_size = process_code_start - m_code_region_start; + std::size_t alloc_size{}; + if ((process_code_start - code_region_start) >= (end - process_code_end)) { + alloc_start = code_region_start; + alloc_size = process_code_start - code_region_start; } else { alloc_start = process_code_end; alloc_size = end - process_code_end; } - const size_t needed_size = - (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size); - R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory); + const std::size_t needed_size{ + (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; + if (alloc_size < needed_size) { + ASSERT(false); + return ResultOutOfMemory; + } - const size_t remaining_size{alloc_size - needed_size}; + const std::size_t remaining_size{alloc_size - needed_size}; // Determine random placements for each region - size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; + std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; if (enable_aslr) { alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment; @@ -148,130 +147,117 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type } // Setup heap and alias regions - m_alias_region_start = alloc_start + alias_rnd; - m_alias_region_end = m_alias_region_start + alias_region_size; - m_heap_region_start = alloc_start + heap_rnd; - m_heap_region_end = m_heap_region_start + heap_region_size; + alias_region_start = alloc_start + alias_rnd; + alias_region_end = alias_region_start + alias_region_size; + heap_region_start = alloc_start + heap_rnd; + heap_region_end = heap_region_start + heap_region_size; if (alias_rnd <= heap_rnd) { - m_heap_region_start += alias_region_size; - m_heap_region_end += alias_region_size; + heap_region_start += alias_region_size; + heap_region_end += alias_region_size; } else { - m_alias_region_start += heap_region_size; - m_alias_region_end += heap_region_size; + alias_region_start += heap_region_size; + alias_region_end += heap_region_size; } // Setup stack region if (stack_region_size) { - m_stack_region_start = alloc_start + stack_rnd; - m_stack_region_end = m_stack_region_start + stack_region_size; + stack_region_start = alloc_start + stack_rnd; + stack_region_end = stack_region_start + stack_region_size; if (alias_rnd < stack_rnd) { - m_stack_region_start += alias_region_size; - m_stack_region_end += alias_region_size; + stack_region_start += alias_region_size; + stack_region_end += alias_region_size; } else { - m_alias_region_start += stack_region_size; - m_alias_region_end += stack_region_size; + alias_region_start += stack_region_size; + alias_region_end += stack_region_size; } if (heap_rnd < stack_rnd) { - m_stack_region_start += heap_region_size; - m_stack_region_end += heap_region_size; + stack_region_start += heap_region_size; + stack_region_end += heap_region_size; } else { - m_heap_region_start += stack_region_size; - m_heap_region_end += stack_region_size; + heap_region_start += stack_region_size; + heap_region_end += stack_region_size; } } // Setup kernel map region if (kernel_map_region_size) { - m_kernel_map_region_start = alloc_start + kmap_rnd; - m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; + kernel_map_region_start = alloc_start + kmap_rnd; + kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; if (alias_rnd < kmap_rnd) { - m_kernel_map_region_start += alias_region_size; - m_kernel_map_region_end += alias_region_size; + kernel_map_region_start += alias_region_size; + kernel_map_region_end += alias_region_size; } else { - m_alias_region_start += kernel_map_region_size; - m_alias_region_end += kernel_map_region_size; + alias_region_start += kernel_map_region_size; + alias_region_end += kernel_map_region_size; } if (heap_rnd < kmap_rnd) { - m_kernel_map_region_start += heap_region_size; - m_kernel_map_region_end += heap_region_size; + kernel_map_region_start += heap_region_size; + kernel_map_region_end += heap_region_size; } else { - m_heap_region_start += kernel_map_region_size; - m_heap_region_end += kernel_map_region_size; + heap_region_start += kernel_map_region_size; + heap_region_end += kernel_map_region_size; } if (stack_region_size) { if (stack_rnd < kmap_rnd) { - m_kernel_map_region_start += stack_region_size; - m_kernel_map_region_end += stack_region_size; + kernel_map_region_start += stack_region_size; + kernel_map_region_end += stack_region_size; } else { - m_stack_region_start += kernel_map_region_size; - m_stack_region_end += kernel_map_region_size; + stack_region_start += kernel_map_region_size; + stack_region_end += kernel_map_region_size; } } } // Set heap members - m_current_heap_end = m_heap_region_start; - m_max_heap_size = 0; - m_max_physical_memory_size = 0; + current_heap_end = heap_region_start; + max_heap_size = 0; + max_physical_memory_size = 0; // Ensure that we regions inside our address space auto IsInAddressSpace = [&](VAddr addr) { - return m_address_space_start <= addr && addr <= m_address_space_end; + return address_space_start <= addr && addr <= address_space_end; }; - ASSERT(IsInAddressSpace(m_alias_region_start)); - ASSERT(IsInAddressSpace(m_alias_region_end)); - ASSERT(IsInAddressSpace(m_heap_region_start)); - ASSERT(IsInAddressSpace(m_heap_region_end)); - ASSERT(IsInAddressSpace(m_stack_region_start)); - ASSERT(IsInAddressSpace(m_stack_region_end)); - ASSERT(IsInAddressSpace(m_kernel_map_region_start)); - ASSERT(IsInAddressSpace(m_kernel_map_region_end)); + ASSERT(IsInAddressSpace(alias_region_start)); + ASSERT(IsInAddressSpace(alias_region_end)); + ASSERT(IsInAddressSpace(heap_region_start)); + ASSERT(IsInAddressSpace(heap_region_end)); + ASSERT(IsInAddressSpace(stack_region_start)); + ASSERT(IsInAddressSpace(stack_region_end)); + ASSERT(IsInAddressSpace(kernel_map_region_start)); + ASSERT(IsInAddressSpace(kernel_map_region_end)); // Ensure that we selected regions that don't overlap - const VAddr alias_start{m_alias_region_start}; - const VAddr alias_last{m_alias_region_end - 1}; - const VAddr heap_start{m_heap_region_start}; - const VAddr heap_last{m_heap_region_end - 1}; - const VAddr stack_start{m_stack_region_start}; - const VAddr stack_last{m_stack_region_end - 1}; - const VAddr kmap_start{m_kernel_map_region_start}; - const VAddr kmap_last{m_kernel_map_region_end - 1}; + const VAddr alias_start{alias_region_start}; + const VAddr alias_last{alias_region_end - 1}; + const VAddr heap_start{heap_region_start}; + const VAddr heap_last{heap_region_end - 1}; + const VAddr stack_start{stack_region_start}; + const VAddr stack_last{stack_region_end - 1}; + const VAddr kmap_start{kernel_map_region_start}; + const VAddr kmap_last{kernel_map_region_end - 1}; ASSERT(alias_last < heap_start || heap_last < alias_start); ASSERT(alias_last < stack_start || stack_last < alias_start); ASSERT(alias_last < kmap_start || kmap_last < alias_start); ASSERT(heap_last < stack_start || stack_last < heap_start); ASSERT(heap_last < kmap_start || kmap_last < heap_start); - m_current_heap_end = m_heap_region_start; - m_max_heap_size = 0; - m_mapped_physical_memory_size = 0; - m_memory_pool = pool; + current_heap_end = heap_region_start; + max_heap_size = 0; + mapped_physical_memory_size = 0; + memory_pool = pool; - m_page_table_impl = std::make_unique(); - m_page_table_impl->Resize(m_address_space_width, PageBits); + page_table_impl.Resize(address_space_width, PageBits); - // Initialize our memory block manager. - R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, - m_memory_block_slab_manager)); + return InitializeMemoryLayout(start, end); } -void KPageTable::Finalize() { - // Finalize memory blocks. - m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) { - m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); - }); - - // Close the backing page table, as the destructor is not called for guest objects. - m_page_table_impl.reset(); -} - -Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state, +Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, KMemoryPermission perm) { const u64 size{num_pages * PageSize}; @@ -279,76 +265,52 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Verify that the destination memory is unmapped. R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); - - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager); - - // Allocate and open. KPageGroup pg; - R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( + R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( &pg, num_pages, - KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); + KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); - // Update the blocks. - m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(addr, num_pages, state, perm); - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) { +Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { // Validate the mapping request. R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), ResultInvalidMemoryRegion); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Verify that the source memory is normal heap. KMemoryState src_state{}; KMemoryPermission src_perm{}; - size_t num_src_allocator_blocks{}; + std::size_t num_src_allocator_blocks{}; R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, src_address, size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); // Verify that the destination memory is unmapped. - size_t num_dst_allocator_blocks{}; + std::size_t num_dst_allocator_blocks{}; R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); - // Create an update allocator for the source. - Result src_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), - m_memory_block_slab_manager, - num_src_allocator_blocks); - R_TRY(src_allocator_result); - - // Create an update allocator for the destination. - Result dst_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), - m_memory_block_slab_manager, - num_dst_allocator_blocks); - R_TRY(dst_allocator_result); - // Map the code memory. { // Determine the number of pages being operated on. - const size_t num_pages = size / PageSize; + const std::size_t num_pages = size / PageSize; // Create page groups for the memory being mapped. KPageGroup pg; @@ -373,37 +335,33 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si unprot_guard.Cancel(); // Apply the memory block updates. - m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, - src_state, new_perm, KMemoryAttribute::Locked, - KMemoryBlockDisableMergeAttribute::Locked, - KMemoryBlockDisableMergeAttribute::None); - m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, - KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(src_address, num_pages, src_state, new_perm, + KMemoryAttribute::Locked); + block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm, + KMemoryAttribute::None); } - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, +Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, ICacheInvalidationStrategy icache_invalidation_strategy) { // Validate the mapping request. R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), ResultInvalidMemoryRegion); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Verify that the source memory is locked normal heap. - size_t num_src_allocator_blocks{}; + std::size_t num_src_allocator_blocks{}; R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::Locked)); // Verify that the destination memory is aliasable code. - size_t num_dst_allocator_blocks{}; + std::size_t num_dst_allocator_blocks{}; R_TRY(this->CheckMemoryStateContiguous( std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, @@ -412,7 +370,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t // Determine whether any pages being unmapped are code. bool any_code_pages = false; { - KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); + KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address); while (true) { // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -438,9 +396,9 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t SCOPE_EXIT({ if (reprotected_pages && any_code_pages) { if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { - m_system.InvalidateCpuInstructionCacheRange(dst_address, size); + system.InvalidateCpuInstructionCacheRange(dst_address, size); } else { - m_system.InvalidateCpuInstructionCaches(); + system.InvalidateCpuInstructionCaches(); } } }); @@ -448,21 +406,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t // Unmap. { // Determine the number of pages being operated on. - const size_t num_pages = size / PageSize; - - // Create an update allocator for the source. - Result src_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), - m_memory_block_slab_manager, - num_src_allocator_blocks); - R_TRY(src_allocator_result); - - // Create an update allocator for the destination. - Result dst_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), - m_memory_block_slab_manager, - num_dst_allocator_blocks); - R_TRY(dst_allocator_result); + const std::size_t num_pages = size / PageSize; // Unmap the aliased copy of the pages. R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); @@ -472,34 +416,73 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t OperationType::ChangePermissions)); // Apply the memory block updates. - m_memory_block_manager.Update( - std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); - m_memory_block_manager.Update( - std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); + block_manager->Update(dst_address, num_pages, KMemoryState::None); + block_manager->Update(src_address, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite); // Note that we reprotected pages. reprotected_pages = true; } - R_SUCCEED(); + return ResultSuccess; } -VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, - size_t alignment, size_t offset, size_t guard_pages) { +VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, + std::size_t num_pages, std::size_t alignment, std::size_t offset, + std::size_t guard_pages) { VAddr address = 0; if (num_pages <= region_num_pages) { if (this->IsAslrEnabled()) { - UNIMPLEMENTED(); + // Try to directly find a free area up to 8 times. + for (std::size_t i = 0; i < 8; i++) { + const std::size_t random_offset = + KSystemControl::GenerateRandomRange( + 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * + alignment; + const VAddr candidate = + Common::AlignDown((region_start + random_offset), alignment) + offset; + + KMemoryInfo info = this->QueryInfoImpl(candidate); + + if (info.state != KMemoryState::Free) { + continue; + } + if (region_start > candidate) { + continue; + } + if (info.GetAddress() + guard_pages * PageSize > candidate) { + continue; + } + + const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1; + if (candidate_end > info.GetLastAddress()) { + continue; + } + if (candidate_end > region_start + region_num_pages * PageSize - 1) { + continue; + } + + address = candidate; + break; + } + // Fall back to finding the first free area with a random offset. + if (address == 0) { + // NOTE: Nintendo does not account for guard pages here. + // This may theoretically cause an offset to be chosen that cannot be mapped. We + // will account for guard pages. + const std::size_t offset_pages = KSystemControl::GenerateRandomRange( + 0, region_num_pages - num_pages - guard_pages); + address = block_manager->FindFreeArea(region_start + offset_pages * PageSize, + region_num_pages - offset_pages, num_pages, + alignment, offset, guard_pages); + } } + // Find the first free area. if (address == 0) { - address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, - alignment, offset, guard_pages); + address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages, + alignment, offset, guard_pages); } } @@ -517,8 +500,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr), - ResultInvalidCurrentMemory); + R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory); // Prepare tracking variables. PAddr cur_addr = next_entry.phys_addr; @@ -526,9 +508,9 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { size_t tot_size = cur_size; // Iterate, adding to group as we go. - const auto& memory_layout = m_system.Kernel().MemoryLayout(); + const auto& memory_layout = system.Kernel().MemoryLayout(); while (tot_size < size) { - R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context), + R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context), ResultInvalidCurrentMemory); if (next_entry.phys_addr != (cur_addr + cur_size)) { @@ -556,7 +538,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); R_TRY(pg.AddBlock(cur_addr, cur_pages)); - R_SUCCEED(); + return ResultSuccess; } bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { @@ -564,7 +546,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu const size_t size = num_pages * PageSize; const auto& pg = pg_ll.Nodes(); - const auto& memory_layout = m_system.Kernel().MemoryLayout(); + const auto& memory_layout = system.Kernel().MemoryLayout(); // Empty groups are necessarily invalid. if (pg.empty()) { @@ -591,7 +573,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) { + if (!page_table_impl.BeginTraversal(next_entry, context, addr)) { return false; } @@ -602,7 +584,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Iterate, comparing expected to actual. while (tot_size < size) { - if (!m_page_table_impl->ContinueTraversal(next_entry, context)) { + if (!page_table_impl.ContinueTraversal(next_entry, context)) { return false; } @@ -648,11 +630,11 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); } -Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, +Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, VAddr src_addr) { - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); - const size_t num_pages{size / PageSize}; + const std::size_t num_pages{size / PageSize}; // Check that the memory is mapped in the destination process. size_t num_allocator_blocks; @@ -667,51 +649,43 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Apply the memory block update. - m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, - KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None); - m_system.InvalidateCpuInstructionCaches(); + system.InvalidateCpuInstructionCaches(); - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { +Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { // Lock the physical memory lock. - KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); // Calculate the last address for convenience. const VAddr last_address = address + size - 1; // Define iteration variables. VAddr cur_address; - size_t mapped_size; + std::size_t mapped_size; // The entire mapping process can be retried. while (true) { // Check if the memory is already mapped. { // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Iterate over the memory. cur_address = address; mapped_size = 0; - auto it = m_memory_block_manager.FindIterator(cur_address); + auto it = block_manager->FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != m_memory_block_manager.end()); + ASSERT(it != block_manager->end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -742,20 +716,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { { // Reserve the memory from the process resource limit. KScopedResourceReservation memory_reservation( - m_system.Kernel().CurrentProcess()->GetResourceLimit(), + system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, size - mapped_size); R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the new memory. KPageGroup pg; - R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( + R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( &pg, (size - mapped_size) / PageSize, - KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); + KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); // Map the memory. { // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); size_t num_allocator_blocks = 0; @@ -765,10 +739,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { size_t checked_mapped_size = 0; cur_address = address; - auto it = m_memory_block_manager.FindIterator(cur_address); + auto it = block_manager->FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != m_memory_block_manager.end()); + ASSERT(it != block_manager->end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -808,14 +782,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { } } - // Create an update allocator. - ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, - num_allocator_blocks); - R_TRY(allocator_result); - // Reset the current tracking address, and make sure we clean up on failure. cur_address = address; auto unmap_guard = detail::ScopeExit([&] { @@ -825,10 +791,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { // Iterate, unmapping the pages. cur_address = address; - auto it = m_memory_block_manager.FindIterator(cur_address); + auto it = block_manager->FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != m_memory_block_manager.end()); + ASSERT(it != block_manager->end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -864,10 +830,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); - auto it = m_memory_block_manager.FindIterator(cur_address); + auto it = block_manager->FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != m_memory_block_manager.end()); + ASSERT(it != block_manager->end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -920,37 +886,37 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { memory_reservation.Commit(); // Increase our tracked mapped size. - m_mapped_physical_memory_size += (size - mapped_size); + mapped_physical_memory_size += (size - mapped_size); // Update the relevant memory blocks. - m_memory_block_manager.UpdateIfMatch( - std::addressof(allocator), address, size / PageSize, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None); + block_manager->Update(address, size / PageSize, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryState::Normal, KMemoryPermission::UserReadWrite, + KMemoryAttribute::None); // Cancel our guard. unmap_guard.Cancel(); - R_SUCCEED(); + return ResultSuccess; } } } } -Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { +Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { // Lock the physical memory lock. - KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Calculate the last address for convenience. const VAddr last_address = address + size - 1; // Define iteration variables. VAddr cur_address = 0; - size_t mapped_size = 0; - size_t num_allocator_blocks = 0; + std::size_t mapped_size = 0; + std::size_t num_allocator_blocks = 0; // Check if the memory is mapped. { @@ -958,10 +924,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { cur_address = address; mapped_size = 0; - auto it = m_memory_block_manager.FindIterator(cur_address); + auto it = block_manager->FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != m_memory_block_manager.end()); + ASSERT(it != block_manager->end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1056,13 +1022,6 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { } ASSERT(pg.GetNumPages() == mapped_size / PageSize); - // Create an update allocator. - ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - // Reset the current tracking address, and make sure we clean up on failure. cur_address = address; auto remap_guard = detail::ScopeExit([&] { @@ -1071,7 +1030,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { cur_address = address; // Iterate over the memory we unmapped. - auto it = m_memory_block_manager.FindIterator(cur_address); + auto it = block_manager->FindIterator(cur_address); auto pg_it = pg.Nodes().begin(); PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); @@ -1126,10 +1085,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { }); // Iterate over the memory, unmapping as we go. - auto it = m_memory_block_manager.FindIterator(cur_address); + auto it = block_manager->FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != m_memory_block_manager.end()); + ASSERT(it != block_manager->end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1156,159 +1115,104 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { } // Release the memory resource. - m_mapped_physical_memory_size -= mapped_size; - auto process{m_system.Kernel().CurrentProcess()}; + mapped_physical_memory_size -= mapped_size; + auto process{system.Kernel().CurrentProcess()}; process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); // Update memory blocks. - m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, - KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None); // TODO(bunnei): This is a workaround until the next set of changes, where we add reference // counting for mapped pages. Until then, we must manually close the reference to the page // group. - m_system.Kernel().MemoryManager().Close(pg); + system.Kernel().MemoryManager().Close(pg); // We succeeded. remap_guard.Cancel(); - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { - // Lock the table. - KScopedLightLock lk(m_general_lock); +Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { + KScopedLightLock lk(general_lock); - // Validate that the source address's state is valid. - KMemoryState src_state; - size_t num_src_allocator_blocks; - R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, - std::addressof(num_src_allocator_blocks), src_address, size, - KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, - KMemoryPermission::All, KMemoryPermission::UserReadWrite, - KMemoryAttribute::All, KMemoryAttribute::None)); + KMemoryState src_state{}; + CASCADE_CODE(CheckMemoryState( + &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, + KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::UserReadWrite, + KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); - // Validate that the dst address's state is valid. - size_t num_dst_allocator_blocks; - R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, - KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryAttribute::None)); + if (IsRegionMapped(dst_addr, size)) { + return ResultInvalidCurrentMemory; + } - // Create an update allocator for the source. - Result src_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), - m_memory_block_slab_manager, - num_src_allocator_blocks); - R_TRY(src_allocator_result); - - // Create an update allocator for the destination. - Result dst_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), - m_memory_block_slab_manager, - num_dst_allocator_blocks); - R_TRY(dst_allocator_result); - - // Map the memory. KPageGroup page_linked_list; - const size_t num_pages{size / PageSize}; - const KMemoryPermission new_src_perm = static_cast( - KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); - const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; + const std::size_t num_pages{size / PageSize}; + + AddRegionToPages(src_addr, num_pages, page_linked_list); - AddRegionToPages(src_address, num_pages, page_linked_list); { - // Reprotect the source as kernel-read/not mapped. auto block_guard = detail::ScopeExit([&] { - Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, + Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::ChangePermissions); }); - R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions)); - R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite)); + + CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, + OperationType::ChangePermissions)); + CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::UserReadWrite)); block_guard.Cancel(); } - // Apply the memory block updates. - m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, - new_src_perm, new_src_attr, - KMemoryBlockDisableMergeAttribute::Locked, - KMemoryBlockDisableMergeAttribute::None); - m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, - KMemoryState::Stack, KMemoryPermission::UserReadWrite, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None, + KMemoryAttribute::Locked); + block_manager->Update(dst_addr, num_pages, KMemoryState::Stack, + KMemoryPermission::UserReadWrite); - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { - // Lock the table. - KScopedLightLock lk(m_general_lock); +Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { + KScopedLightLock lk(general_lock); - // Validate that the source address's state is valid. - KMemoryState src_state; - size_t num_src_allocator_blocks; - R_TRY(this->CheckMemoryState( - std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), - src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, - KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead, - KMemoryAttribute::All, KMemoryAttribute::Locked)); + KMemoryState src_state{}; + CASCADE_CODE(CheckMemoryState( + &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, + KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None, + KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); - // Validate that the dst address's state is valid. - KMemoryPermission dst_perm; - size_t num_dst_allocator_blocks; - R_TRY(this->CheckMemoryState( - nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), - dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); - - // Create an update allocator for the source. - Result src_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), - m_memory_block_slab_manager, - num_src_allocator_blocks); - R_TRY(src_allocator_result); - - // Create an update allocator for the destination. - Result dst_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), - m_memory_block_slab_manager, - num_dst_allocator_blocks); - R_TRY(dst_allocator_result); + KMemoryPermission dst_perm{}; + CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, nullptr, dst_addr, size, + KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::Mask, + KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); KPageGroup src_pages; KPageGroup dst_pages; - const size_t num_pages{size / PageSize}; + const std::size_t num_pages{size / PageSize}; - AddRegionToPages(src_address, num_pages, src_pages); - AddRegionToPages(dst_address, num_pages, dst_pages); + AddRegionToPages(src_addr, num_pages, src_pages); + AddRegionToPages(dst_addr, num_pages, dst_pages); - R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); + if (!dst_pages.IsEqual(src_pages)) { + return ResultInvalidMemoryRegion; + } { - auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); + auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); - R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); - R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, - OperationType::ChangePermissions)); + CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); + CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, + OperationType::ChangePermissions)); block_guard.Cancel(); } - // Apply the memory block updates. - m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Locked); - m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, - KMemoryState::None, KMemoryPermission::None, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::UserReadWrite); + block_manager->Update(dst_addr, num_pages, KMemoryState::Free); - R_SUCCEED(); + return ResultSuccess; } Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, @@ -1321,54 +1225,48 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, if (const auto result{ Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; result.IsError()) { - const size_t num_pages{(addr - cur_addr) / PageSize}; + const std::size_t num_pages{(addr - cur_addr) / PageSize}; ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) .IsSuccess()); - R_RETURN(result); + return result; } cur_addr += node.GetNumPages() * PageSize; } - R_SUCCEED(); + return ResultSuccess; } Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, KMemoryPermission perm) { // Check that the map is in range. - const size_t num_pages{page_linked_list.GetNumPages()}; - const size_t size{num_pages * PageSize}; + const std::size_t num_pages{page_linked_list.GetNumPages()}; + const std::size_t size{num_pages * PageSize}; R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Check the memory state. R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager); - // Map the pages. R_TRY(MapPages(address, page_linked_list, perm)); // Update the blocks. - m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(address, num_pages, state, perm); - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, - bool is_pa_valid, VAddr region_start, size_t region_num_pages, - KMemoryState state, KMemoryPermission perm) { +Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, + PAddr phys_addr, bool is_pa_valid, VAddr region_start, + std::size_t region_num_pages, KMemoryState state, + KMemoryPermission perm) { ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); // Ensure this is a valid map request. @@ -1377,7 +1275,7 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Find a random address to map at. VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, @@ -1390,11 +1288,6 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, KMemoryAttribute::None, KMemoryAttribute::None) .IsSuccess()); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager); - // Perform mapping operation. if (is_pa_valid) { R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); @@ -1403,13 +1296,11 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, } // Update the blocks. - m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(addr, num_pages, state, perm); // We successfully mapped the pages. *out_addr = addr; - R_SUCCEED(); + return ResultSuccess; } Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { @@ -1421,80 +1312,60 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, OperationType::Unmap)}; result.IsError()) { - R_RETURN(result); + return result; } cur_addr += node.GetNumPages() * PageSize; } - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { +Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) { // Check that the unmap is in range. - const size_t num_pages{page_linked_list.GetNumPages()}; - const size_t size{num_pages * PageSize}; - R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + const std::size_t num_pages{page_linked_list.GetNumPages()}; + const std::size_t size{num_pages * PageSize}; + R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Check the memory state. - size_t num_allocator_blocks; - R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, - KMemoryState::All, state, KMemoryPermission::None, + R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - // Perform the unmap. - R_TRY(UnmapPages(address, page_linked_list)); + R_TRY(UnmapPages(addr, page_linked_list)); // Update the blocks. - m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + block_manager->Update(addr, num_pages, state, KMemoryPermission::None); - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { +Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) { // Check that the unmap is in range. - const size_t size = num_pages * PageSize; + const std::size_t size = num_pages * PageSize; R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Check the memory state. - size_t num_allocator_blocks{}; + std::size_t num_allocator_blocks{}; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState::All, state, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - // Perform the unmap. R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Update the blocks. - m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None); - R_SUCCEED(); + return ResultSuccess; } Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, @@ -1509,7 +1380,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Check if state allows us to create the group. R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, @@ -1519,15 +1390,15 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n // Create a new page group for the region. R_TRY(this->MakePageGroup(*out, address, num_pages)); - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size, +Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Verify we can change the memory permission. KMemoryState old_state; @@ -1564,101 +1435,105 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size, // Succeed if there's nothing to do. R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - // Perform mapping operation. const auto operation = was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions; R_TRY(Operate(addr, num_pages, new_perm, operation)); // Update the blocks. - m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(addr, num_pages, new_state, new_perm, KMemoryAttribute::None); // Ensure cache coherency, if we're setting pages as executable. if (is_x) { - m_system.InvalidateCpuInstructionCacheRange(addr, size); + system.InvalidateCpuInstructionCacheRange(addr, size); } - R_SUCCEED(); + return ResultSuccess; } KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); - return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo(); + return block_manager->FindBlock(addr).GetMemoryInfo(); } KMemoryInfo KPageTable::QueryInfo(VAddr addr) { if (!Contains(addr, 1)) { - return { - .m_address = m_address_space_end, - .m_size = 0 - m_address_space_end, - .m_state = static_cast(Svc::MemoryState::Inaccessible), - .m_device_disable_merge_left_count = 0, - .m_device_disable_merge_right_count = 0, - .m_ipc_lock_count = 0, - .m_device_use_count = 0, - .m_ipc_disable_merge_count = 0, - .m_permission = KMemoryPermission::None, - .m_attribute = KMemoryAttribute::None, - .m_original_permission = KMemoryPermission::None, - .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None, - }; + return {address_space_end, 0 - address_space_end, KMemoryState::Inaccessible, + KMemoryPermission::None, KMemoryAttribute::None, KMemoryPermission::None}; } return QueryInfoImpl(addr); } -Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { +Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { + KScopedLightLock lk(general_lock); + + KMemoryState state{}; + KMemoryAttribute attribute{}; + + R_TRY(CheckMemoryState(&state, nullptr, &attribute, nullptr, addr, size, + KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, + KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, + KMemoryPermission::All, KMemoryPermission::UserReadWrite, + KMemoryAttribute::Mask, KMemoryAttribute::None, + KMemoryAttribute::IpcAndDeviceMapped)); + + block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked); + + return ResultSuccess; +} + +Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) { + KScopedLightLock lk(general_lock); + + KMemoryState state{}; + + R_TRY(CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size, + KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, + KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, + KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask, + KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); + + block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite); + return ResultSuccess; +} + +Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, + Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Verify we can change the memory permission. KMemoryState old_state; KMemoryPermission old_perm; - size_t num_allocator_blocks; - R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, - std::addressof(num_allocator_blocks), addr, size, - KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, - KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::All, KMemoryAttribute::None)); + R_TRY(this->CheckMemoryState( + std::addressof(old_state), std::addressof(old_perm), nullptr, nullptr, addr, size, + KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); // Determine new perm. const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); R_SUCCEED_IF(old_perm == new_perm); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - // Perform mapping operation. R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); // Update the blocks. - m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None); - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) { +Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) { const size_t num_pages = size / PageSize; ASSERT((static_cast(mask) | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Verify we can change the memory attribute. KMemoryState old_state; @@ -1673,12 +1548,6 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 att KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - // Determine the new attribute. const KMemoryAttribute new_attr = static_cast(((old_attr & static_cast(~mask)) | @@ -1688,142 +1557,123 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 att this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); // Update the blocks. - m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, - new_attr, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(addr, num_pages, old_state, old_perm, new_attr); - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::SetMaxHeapSize(size_t size) { +Result KPageTable::SetMaxHeapSize(std::size_t size) { // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Only process page tables are allowed to set heap size. ASSERT(!this->IsKernel()); - m_max_heap_size = size; + max_heap_size = size; - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::SetHeapSize(VAddr* out, size_t size) { +Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { // Lock the physical memory mutex. - KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); // Try to perform a reduction in heap, instead of an extension. VAddr cur_address{}; - size_t allocation_size{}; + std::size_t allocation_size{}; { // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Validate that setting heap size is possible at all. - R_UNLESS(!m_is_kernel, ResultOutOfMemory); - R_UNLESS(size <= static_cast(m_heap_region_end - m_heap_region_start), + R_UNLESS(!is_kernel, ResultOutOfMemory); + R_UNLESS(size <= static_cast(heap_region_end - heap_region_start), ResultOutOfMemory); - R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); + R_UNLESS(size <= max_heap_size, ResultOutOfMemory); if (size < GetHeapSize()) { // The size being requested is less than the current size, so we need to free the end of // the heap. // Validate memory state. - size_t num_allocator_blocks; + std::size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), - m_heap_region_start + size, GetHeapSize() - size, + heap_region_start + size, GetHeapSize() - size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, - num_allocator_blocks); - R_TRY(allocator_result); - // Unmap the end of the heap. const auto num_pages = (GetHeapSize() - size) / PageSize; - R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None, + R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Release the memory from the resource limit. - m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( + system.Kernel().CurrentProcess()->GetResourceLimit()->Release( LimitableResource::PhysicalMemory, num_pages * PageSize); // Apply the memory block update. - m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, - num_pages, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - size == 0 ? KMemoryBlockDisableMergeAttribute::Normal - : KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None); // Update the current heap end. - m_current_heap_end = m_heap_region_start + size; + current_heap_end = heap_region_start + size; // Set the output. - *out = m_heap_region_start; - R_SUCCEED(); + *out = heap_region_start; + return ResultSuccess; } else if (size == GetHeapSize()) { // The size requested is exactly the current size. - *out = m_heap_region_start; - R_SUCCEED(); + *out = heap_region_start; + return ResultSuccess; } else { // We have to allocate memory. Determine how much to allocate and where while the table // is locked. - cur_address = m_current_heap_end; + cur_address = current_heap_end; allocation_size = size - GetHeapSize(); } } // Reserve memory for the heap extension. KScopedResourceReservation memory_reservation( - m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, + system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, allocation_size); R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the heap extension. KPageGroup pg; - R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( + R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( &pg, allocation_size / PageSize, - KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); + KMemoryManager::EncodeOption(memory_pool, allocation_option))); // Clear all the newly allocated pages. for (const auto& it : pg.Nodes()) { - std::memset(m_system.DeviceMemory().GetPointer(it.GetAddress()), m_heap_fill_value, + std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, it.GetSize()); } // Map the pages. { // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Ensure that the heap hasn't changed since we began executing. - ASSERT(cur_address == m_current_heap_end); + ASSERT(cur_address == current_heap_end); // Check the memory state. - size_t num_allocator_blocks{}; - R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, + std::size_t num_allocator_blocks{}; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end, allocation_size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator( - std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - // Map the pages. const auto num_pages = allocation_size / PageSize; - R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup)); + R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); // Clear all the newly allocated pages. - for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) { - std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0, + for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { + std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0, PageSize); } @@ -1831,172 +1681,133 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { memory_reservation.Commit(); // Apply the memory block update. - m_memory_block_manager.Update( - std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None, - m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal - : KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None); // Update the current heap end. - m_current_heap_end = m_heap_region_start + size; + current_heap_end = heap_region_start + size; // Set the output. - *out = m_heap_region_start; - R_SUCCEED(); + *out = heap_region_start; + return ResultSuccess; } } -ResultVal KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align, +ResultVal KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, bool is_map_only, VAddr region_start, - size_t region_num_pages, KMemoryState state, + std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm, PAddr map_addr) { - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); + + if (!CanContain(region_start, region_num_pages * PageSize, state)) { + return ResultInvalidCurrentMemory; + } + + if (region_num_pages <= needed_num_pages) { + return ResultOutOfMemory; + } - R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state), - ResultInvalidCurrentMemory); - R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory); const VAddr addr{ AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; - R_UNLESS(addr, ResultOutOfMemory); - - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager); + if (!addr) { + return ResultOutOfMemory; + } if (is_map_only) { R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); } else { KPageGroup page_group; - R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( + R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( &page_group, needed_num_pages, - KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); + KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); } - // Update the blocks. - m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(addr, needed_num_pages, state, perm); return addr; } -Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, - bool is_aligned) { - // Lightly validate the range before doing anything else. - const size_t num_pages = size / PageSize; - R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); +Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { + KScopedLightLock lk(general_lock); - // Lock the table. - KScopedLightLock lk(m_general_lock); + KMemoryPermission perm{}; + if (const Result result{CheckMemoryState( + nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, + KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, + KMemoryAttribute::DeviceSharedAndUncached)}; + result.IsError()) { + return result; + } - // Check the memory state. - const auto test_state = - (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); - size_t num_allocator_blocks; - R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, - test_state, perm, perm, - KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, - KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); + block_manager->UpdateLock( + addr, size / PageSize, + [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { + block->ShareToDevice(permission); + }, + perm); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - - // Update the memory blocks. - m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, - &KMemoryBlock::ShareToDevice, KMemoryPermission::None); - - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { - // Lightly validate the range before doing anything else. - const size_t num_pages = size / PageSize; - R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); +Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { + KScopedLightLock lk(general_lock); - // Lock the table. - KScopedLightLock lk(m_general_lock); + KMemoryPermission perm{}; + if (const Result result{CheckMemoryState( + nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, + KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, + KMemoryAttribute::DeviceSharedAndUncached)}; + result.IsError()) { + return result; + } - // Check the memory state. - size_t num_allocator_blocks; - R_TRY(this->CheckMemoryStateContiguous( - std::addressof(num_allocator_blocks), address, size, - KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, - KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, - KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); + block_manager->UpdateLock( + addr, size / PageSize, + [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { + block->UnshareToDevice(permission); + }, + perm); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - - // Update the memory blocks. - const KMemoryBlockManager::MemoryBlockLockFunction lock_func = - m_enable_device_address_space_merge - ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare - : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; - m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, - KMemoryPermission::None); - - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { - // Lightly validate the range before doing anything else. - const size_t num_pages = size / PageSize; - R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); - - // Lock the table. - KScopedLightLock lk(m_general_lock); - - // Check the memory state. - size_t num_allocator_blocks; - R_TRY(this->CheckMemoryStateContiguous( - std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap, - KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); - - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - - // Update the memory blocks. - m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, - &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); - - R_SUCCEED(); -} - -Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { - R_RETURN(this->LockMemoryAndOpen( +Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) { + return this->LockMemoryAndOpen( out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, static_cast(KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite), - KMemoryAttribute::Locked)); + KMemoryAttribute::Locked); } -Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) { - R_RETURN(this->UnlockMemory( +Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) { + return this->UnlockMemory( addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, - KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg)); + KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg); +} + +Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { + block_manager = std::make_unique(start, end); + + return ResultSuccess; +} + +bool KPageTable::IsRegionMapped(VAddr address, u64 size) { + return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, + KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask, + KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped) + .IsError(); } bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { - auto start_ptr = m_system.DeviceMemory().GetPointer(addr); + auto start_ptr = system.Memory().GetPointer(addr); for (u64 offset{}; offset < size; offset += PageSize) { - if (start_ptr != m_system.DeviceMemory().GetPointer(addr + offset)) { + if (start_ptr != system.Memory().GetPointer(addr + offset)) { return false; } start_ptr += PageSize; @@ -2004,7 +1815,8 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { return true; } -void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) { +void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, + KPageGroup& page_linked_list) { VAddr addr{start}; while (addr < start + (num_pages * PageSize)) { const PAddr paddr{GetPhysicalAddr(addr)}; @@ -2014,16 +1826,16 @@ void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& pag } } -VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, - size_t align) { - if (m_enable_aslr) { +VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, + u64 needed_num_pages, std::size_t align) { + if (is_aslr_enabled) { UNIMPLEMENTED(); } - return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, - IsKernel() ? 1 : 4); + return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, + IsKernel() ? 1 : 4); } -Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, +Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, OperationType operation) { ASSERT(this->IsLockedByCurrentThread()); @@ -2032,11 +1844,11 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_ ASSERT(num_pages == page_group.GetNumPages()); for (const auto& node : page_group.Nodes()) { - const size_t size{node.GetNumPages() * PageSize}; + const std::size_t size{node.GetNumPages() * PageSize}; switch (operation) { case OperationType::MapGroup: - m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); + system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); break; default: ASSERT(false); @@ -2045,10 +1857,10 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_ addr += size; } - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, +Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, OperationType operation, PAddr map_addr) { ASSERT(this->IsLockedByCurrentThread()); @@ -2058,12 +1870,12 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, switch (operation) { case OperationType::Unmap: - m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); + system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); break; case OperationType::Map: { ASSERT(map_addr); ASSERT(Common::IsAligned(map_addr, PageSize)); - m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); + system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); break; } case OperationType::ChangePermissions: @@ -2072,25 +1884,25 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, default: ASSERT(false); } - R_SUCCEED(); + return ResultSuccess; } VAddr KPageTable::GetRegionAddress(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: - return m_address_space_start; + return address_space_start; case KMemoryState::Normal: - return m_heap_region_start; + return heap_region_start; case KMemoryState::Ipc: case KMemoryState::NonSecureIpc: case KMemoryState::NonDeviceIpc: - return m_alias_region_start; + return alias_region_start; case KMemoryState::Stack: - return m_stack_region_start; + return stack_region_start; case KMemoryState::Static: case KMemoryState::ThreadLocal: - return m_kernel_map_region_start; + return kernel_map_region_start; case KMemoryState::Io: case KMemoryState::Shared: case KMemoryState::AliasCode: @@ -2101,31 +1913,31 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { case KMemoryState::GeneratedCode: case KMemoryState::CodeOut: case KMemoryState::Coverage: - return m_alias_code_region_start; + return alias_code_region_start; case KMemoryState::Code: case KMemoryState::CodeData: - return m_code_region_start; + return code_region_start; default: UNREACHABLE(); } } -size_t KPageTable::GetRegionSize(KMemoryState state) const { +std::size_t KPageTable::GetRegionSize(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: - return m_address_space_end - m_address_space_start; + return address_space_end - address_space_start; case KMemoryState::Normal: - return m_heap_region_end - m_heap_region_start; + return heap_region_end - heap_region_start; case KMemoryState::Ipc: case KMemoryState::NonSecureIpc: case KMemoryState::NonDeviceIpc: - return m_alias_region_end - m_alias_region_start; + return alias_region_end - alias_region_start; case KMemoryState::Stack: - return m_stack_region_end - m_stack_region_start; + return stack_region_end - stack_region_start; case KMemoryState::Static: case KMemoryState::ThreadLocal: - return m_kernel_map_region_end - m_kernel_map_region_start; + return kernel_map_region_end - kernel_map_region_start; case KMemoryState::Io: case KMemoryState::Shared: case KMemoryState::AliasCode: @@ -2136,16 +1948,16 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const { case KMemoryState::GeneratedCode: case KMemoryState::CodeOut: case KMemoryState::Coverage: - return m_alias_code_region_end - m_alias_code_region_start; + return alias_code_region_end - alias_code_region_start; case KMemoryState::Code: case KMemoryState::CodeData: - return m_code_region_end - m_code_region_start; + return code_region_end - code_region_start; default: UNREACHABLE(); } } -bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { +bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { const VAddr end = addr + size; const VAddr last = end - 1; @@ -2154,10 +1966,10 @@ bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1; - const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || - m_heap_region_start == m_heap_region_end); - const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || - m_alias_region_start == m_alias_region_end); + const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr || + heap_region_start == heap_region_end); + const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr || + alias_region_start == alias_region_end); switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: @@ -2196,23 +2008,23 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_ KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const { // Validate the states match expectation. - R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory); - R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory); - R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory); + R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory); + R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory); + R_UNLESS((info.attribute & attr_mask) == attr, ResultInvalidCurrentMemory); - R_SUCCEED(); + return ResultSuccess; } -Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, - KMemoryState state_mask, KMemoryState state, - KMemoryPermission perm_mask, KMemoryPermission perm, - KMemoryAttribute attr_mask, +Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, + std::size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const { ASSERT(this->IsLockedByCurrentThread()); // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); + KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2230,7 +2042,7 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr a // Advance our iterator. it++; - ASSERT(it != m_memory_block_manager.cend()); + ASSERT(it != block_manager->cend()); info = it->GetMemoryInfo(); } @@ -2242,12 +2054,12 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr a *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; } - R_SUCCEED(); + return ResultSuccess; } Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, size_t* out_blocks_needed, - VAddr addr, size_t size, KMemoryState state_mask, + KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, + VAddr addr, std::size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { @@ -2255,7 +2067,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); + KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2263,14 +2075,14 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0; // Validate all blocks in the range have correct state. - const KMemoryState first_state = info.m_state; - const KMemoryPermission first_perm = info.m_permission; - const KMemoryAttribute first_attr = info.m_attribute; + const KMemoryState first_state = info.state; + const KMemoryPermission first_perm = info.perm; + const KMemoryAttribute first_attr = info.attribute; while (true) { // Validate the current block. - R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory); - R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory); - R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), + R_UNLESS(info.state == first_state, ResultInvalidCurrentMemory); + R_UNLESS(info.perm == first_perm, ResultInvalidCurrentMemory); + R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr), ResultInvalidCurrentMemory); // Validate against the provided masks. @@ -2283,7 +2095,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Advance our iterator. it++; - ASSERT(it != m_memory_block_manager.cend()); + ASSERT(it != block_manager->cend()); info = it->GetMemoryInfo(); } @@ -2304,7 +2116,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* if (out_blocks_needed != nullptr) { *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; } - R_SUCCEED(); + return ResultSuccess; } Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, @@ -2322,7 +2134,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Check that the output page group is empty, if it exists. if (out_pg) { @@ -2350,12 +2162,6 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); } - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - // Decide on new perm and attr. new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; KMemoryAttribute new_attr = static_cast(old_attr | lock_attr); @@ -2366,11 +2172,9 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr } // Apply the memory block updates. - m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, - new_attr, KMemoryBlockDisableMergeAttribute::Locked, - KMemoryBlockDisableMergeAttribute::None); + block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); - R_SUCCEED(); + return ResultSuccess; } Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, @@ -2387,7 +2191,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(m_general_lock); + KScopedLightLock lk(general_lock); // Check the state. KMemoryState old_state{}; @@ -2409,23 +2213,15 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; KMemoryAttribute new_attr = static_cast(old_attr & ~lock_attr); - // Create an update allocator. - Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - m_memory_block_slab_manager, num_allocator_blocks); - R_TRY(allocator_result); - // Update permission, if we need to. if (new_perm != old_perm) { R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); } // Apply the memory block updates. - m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, - new_attr, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Locked); + block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); - R_SUCCEED(); + return ResultSuccess; } } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index c6aeacd96..25774f232 100755 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -9,10 +9,8 @@ #include "common/common_types.h" #include "common/page_table.h" #include "core/file_sys/program_metadata.h" -#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_memory_block.h" -#include "core/hle/kernel/k_memory_block_manager.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_manager.h" #include "core/hle/result.h" @@ -36,66 +34,58 @@ public: ~KPageTable(); Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, size_t code_size, - KMemoryBlockSlabManager* mem_block_slab_manager, - KMemoryManager::Pool pool); - - void Finalize(); - - Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state, + VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool); + Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, KMemoryPermission perm); - Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size); - Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, + Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); + Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, ICacheInvalidationStrategy icache_invalidation_strategy); - Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, + Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, VAddr src_addr); - Result MapPhysicalMemory(VAddr addr, size_t size); - Result UnmapPhysicalMemory(VAddr addr, size_t size); - Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); - Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); + Result MapPhysicalMemory(VAddr addr, std::size_t size); + Result UnmapPhysicalMemory(VAddr addr, std::size_t size); + Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, KMemoryPermission perm); - Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, + Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, KMemoryState state, KMemoryPermission perm) { - R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, - this->GetRegionAddress(state), - this->GetRegionSize(state) / PageSize, state, perm)); + return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, + this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, + state, perm); } Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); - Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state); - Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); + Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); + Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); KMemoryInfo QueryInfo(VAddr addr); - Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); - Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); - Result SetMaxHeapSize(size_t size); - Result SetHeapSize(VAddr* out, size_t size); - ResultVal AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, - VAddr region_start, size_t region_num_pages, - KMemoryState state, KMemoryPermission perm, - PAddr map_addr = 0); - - Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, - bool is_aligned); - Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); - - Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); - - Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); - Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); + Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); + Result ResetTransferMemory(VAddr addr, std::size_t size); + Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); + Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr); + Result SetMaxHeapSize(std::size_t size); + Result SetHeapSize(VAddr* out, std::size_t size); + ResultVal AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, + bool is_map_only, VAddr region_start, + std::size_t region_num_pages, KMemoryState state, + KMemoryPermission perm, PAddr map_addr = 0); + Result LockForDeviceAddressSpace(VAddr addr, std::size_t size); + Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); + Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size); + Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg); Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr); Common::PageTable& PageTableImpl() { - return *m_page_table_impl; + return page_table_impl; } const Common::PageTable& PageTableImpl() const { - return *m_page_table_impl; + return page_table_impl; } - bool CanContain(VAddr addr, size_t size, KMemoryState state) const; + bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; private: enum class OperationType : u32 { @@ -106,65 +96,67 @@ private: ChangePermissionsAndRefresh, }; - static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = - KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; + static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask | + KMemoryAttribute::IpcLocked | + KMemoryAttribute::DeviceShared; + Result InitializeMemoryLayout(VAddr start, VAddr end); Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); - Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, - bool is_pa_valid, VAddr region_start, size_t region_num_pages, + Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, + bool is_pa_valid, VAddr region_start, std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm); Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); + bool IsRegionMapped(VAddr address, u64 size); bool IsRegionContiguous(VAddr addr, u64 size) const; - void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); + void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list); KMemoryInfo QueryInfoImpl(VAddr addr); - VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, - size_t align); - Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, + VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, + std::size_t align); + Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, OperationType operation); - Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, - PAddr map_addr = 0); + Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, + OperationType operation, PAddr map_addr = 0); VAddr GetRegionAddress(KMemoryState state) const; - size_t GetRegionSize(KMemoryState state) const; + std::size_t GetRegionSize(KMemoryState state) const; - VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, - size_t alignment, size_t offset, size_t guard_pages); + VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, + std::size_t alignment, std::size_t offset, std::size_t guard_pages); - Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, + Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const; - Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask, + Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const { - R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, - perm, attr_mask, attr)); + return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, + perm, attr_mask, attr); } Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const; Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr, - size_t size, KMemoryState state_mask, KMemoryState state, + KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr, + std::size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; - Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size, + Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { - R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, - state_mask, state, perm_mask, perm, attr_mask, attr, - ignore_attr)); + return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, + state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); } - Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, - KMemoryPermission perm_mask, KMemoryPermission perm, + Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { - R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, - attr_mask, attr, ignore_attr)); + return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, + attr_mask, attr, ignore_attr); } Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, @@ -182,13 +174,13 @@ private: bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); bool IsLockedByCurrentThread() const { - return m_general_lock.IsLockedByCurrentThread(); + return general_lock.IsLockedByCurrentThread(); } bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { ASSERT(this->IsLockedByCurrentThread()); - return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); + return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr); } bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { @@ -199,93 +191,95 @@ private: return *out != 0; } - mutable KLightLock m_general_lock; - mutable KLightLock m_map_physical_memory_lock; + mutable KLightLock general_lock; + mutable KLightLock map_physical_memory_lock; + + std::unique_ptr block_manager; public: constexpr VAddr GetAddressSpaceStart() const { - return m_address_space_start; + return address_space_start; } constexpr VAddr GetAddressSpaceEnd() const { - return m_address_space_end; + return address_space_end; } - constexpr size_t GetAddressSpaceSize() const { - return m_address_space_end - m_address_space_start; + constexpr std::size_t GetAddressSpaceSize() const { + return address_space_end - address_space_start; } constexpr VAddr GetHeapRegionStart() const { - return m_heap_region_start; + return heap_region_start; } constexpr VAddr GetHeapRegionEnd() const { - return m_heap_region_end; + return heap_region_end; } - constexpr size_t GetHeapRegionSize() const { - return m_heap_region_end - m_heap_region_start; + constexpr std::size_t GetHeapRegionSize() const { + return heap_region_end - heap_region_start; } constexpr VAddr GetAliasRegionStart() const { - return m_alias_region_start; + return alias_region_start; } constexpr VAddr GetAliasRegionEnd() const { - return m_alias_region_end; + return alias_region_end; } - constexpr size_t GetAliasRegionSize() const { - return m_alias_region_end - m_alias_region_start; + constexpr std::size_t GetAliasRegionSize() const { + return alias_region_end - alias_region_start; } constexpr VAddr GetStackRegionStart() const { - return m_stack_region_start; + return stack_region_start; } constexpr VAddr GetStackRegionEnd() const { - return m_stack_region_end; + return stack_region_end; } - constexpr size_t GetStackRegionSize() const { - return m_stack_region_end - m_stack_region_start; + constexpr std::size_t GetStackRegionSize() const { + return stack_region_end - stack_region_start; } constexpr VAddr GetKernelMapRegionStart() const { - return m_kernel_map_region_start; + return kernel_map_region_start; } constexpr VAddr GetKernelMapRegionEnd() const { - return m_kernel_map_region_end; + return kernel_map_region_end; } constexpr VAddr GetCodeRegionStart() const { - return m_code_region_start; + return code_region_start; } constexpr VAddr GetCodeRegionEnd() const { - return m_code_region_end; + return code_region_end; } constexpr VAddr GetAliasCodeRegionStart() const { - return m_alias_code_region_start; + return alias_code_region_start; } constexpr VAddr GetAliasCodeRegionSize() const { - return m_alias_code_region_end - m_alias_code_region_start; + return alias_code_region_end - alias_code_region_start; } - size_t GetNormalMemorySize() { - KScopedLightLock lk(m_general_lock); - return GetHeapSize() + m_mapped_physical_memory_size; + std::size_t GetNormalMemorySize() { + KScopedLightLock lk(general_lock); + return GetHeapSize() + mapped_physical_memory_size; } - constexpr size_t GetAddressSpaceWidth() const { - return m_address_space_width; + constexpr std::size_t GetAddressSpaceWidth() const { + return address_space_width; } - constexpr size_t GetHeapSize() const { - return m_current_heap_end - m_heap_region_start; + constexpr std::size_t GetHeapSize() const { + return current_heap_end - heap_region_start; } - constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const { - return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1; + constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { + return address_space_start <= address && address + size - 1 <= address_space_end - 1; } - constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const { - return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1; + constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { + return alias_region_start > address || address + size - 1 > alias_region_end - 1; } - constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const { - return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1; + constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { + return stack_region_start > address || address + size - 1 > stack_region_end - 1; } - constexpr bool IsInvalidRegion(VAddr address, size_t size) const { + constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; } - constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const { - return address + size > m_heap_region_start && m_heap_region_end > address; + constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { + return address + size > heap_region_start && heap_region_end > address; } - constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const { - return address + size > m_alias_region_start && m_alias_region_end > address; + constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { + return address + size > alias_region_start && alias_region_end > address; } - constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const { + constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { if (IsInvalidRegion(address, size)) { return true; } @@ -297,78 +291,73 @@ public: } return {}; } - constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const { + constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { return !IsOutsideASLRRegion(address, size); } - constexpr size_t GetNumGuardPages() const { + constexpr std::size_t GetNumGuardPages() const { return IsKernel() ? 1 : 4; } PAddr GetPhysicalAddr(VAddr addr) const { - const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits]; + const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; ASSERT(backing_addr); return backing_addr + addr; } constexpr bool Contains(VAddr addr) const { - return m_address_space_start <= addr && addr <= m_address_space_end - 1; + return address_space_start <= addr && addr <= address_space_end - 1; } - constexpr bool Contains(VAddr addr, size_t size) const { - return m_address_space_start <= addr && addr < addr + size && - addr + size - 1 <= m_address_space_end - 1; + constexpr bool Contains(VAddr addr, std::size_t size) const { + return address_space_start <= addr && addr < addr + size && + addr + size - 1 <= address_space_end - 1; } private: constexpr bool IsKernel() const { - return m_is_kernel; + return is_kernel; } constexpr bool IsAslrEnabled() const { - return m_enable_aslr; + return is_aslr_enabled; } - constexpr bool ContainsPages(VAddr addr, size_t num_pages) const { - return (m_address_space_start <= addr) && - (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && - (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); + constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { + return (address_space_start <= addr) && + (num_pages <= (address_space_end - address_space_start) / PageSize) && + (addr + num_pages * PageSize - 1 <= address_space_end - 1); } private: - VAddr m_address_space_start{}; - VAddr m_address_space_end{}; - VAddr m_heap_region_start{}; - VAddr m_heap_region_end{}; - VAddr m_current_heap_end{}; - VAddr m_alias_region_start{}; - VAddr m_alias_region_end{}; - VAddr m_stack_region_start{}; - VAddr m_stack_region_end{}; - VAddr m_kernel_map_region_start{}; - VAddr m_kernel_map_region_end{}; - VAddr m_code_region_start{}; - VAddr m_code_region_end{}; - VAddr m_alias_code_region_start{}; - VAddr m_alias_code_region_end{}; + VAddr address_space_start{}; + VAddr address_space_end{}; + VAddr heap_region_start{}; + VAddr heap_region_end{}; + VAddr current_heap_end{}; + VAddr alias_region_start{}; + VAddr alias_region_end{}; + VAddr stack_region_start{}; + VAddr stack_region_end{}; + VAddr kernel_map_region_start{}; + VAddr kernel_map_region_end{}; + VAddr code_region_start{}; + VAddr code_region_end{}; + VAddr alias_code_region_start{}; + VAddr alias_code_region_end{}; - size_t m_mapped_physical_memory_size{}; - size_t m_max_heap_size{}; - size_t m_max_physical_memory_size{}; - size_t m_address_space_width{}; + std::size_t mapped_physical_memory_size{}; + std::size_t max_heap_size{}; + std::size_t max_physical_memory_size{}; + std::size_t address_space_width{}; - KMemoryBlockManager m_memory_block_manager; + bool is_kernel{}; + bool is_aslr_enabled{}; - bool m_is_kernel{}; - bool m_enable_aslr{}; - bool m_enable_device_address_space_merge{}; + u32 heap_fill_value{}; + const KMemoryRegion* cached_physical_heap_region{}; - KMemoryBlockSlabManager* m_memory_block_slab_manager{}; + KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; + KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; - u32 m_heap_fill_value{}; - const KMemoryRegion* m_cached_physical_heap_region{}; + Common::PageTable page_table_impl; - KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; - KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront}; - - std::unique_ptr m_page_table_impl; - - Core::System& m_system; + Core::System& system; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 8c3495e5a..d3e99665f 100755 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -72,8 +72,7 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process->name = std::move(process_name); process->resource_limit = res_limit; - process->system_resource_address = 0; - process->state = State::Created; + process->status = ProcessStatus::Created; process->program_id = 0; process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() : kernel.CreateNewUserProcessID(); @@ -93,12 +92,11 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process->exception_thread = nullptr; process->is_suspended = false; process->schedule_count = 0; - process->is_handle_table_initialized = false; // Open a reference to the resource limit. process->resource_limit->Open(); - R_SUCCEED(); + return ResultSuccess; } void KProcess::DoWorkerTaskImpl() { @@ -123,9 +121,9 @@ void KProcess::DecrementRunningThreadCount() { } } -u64 KProcess::GetTotalPhysicalMemoryAvailable() { +u64 KProcess::GetTotalPhysicalMemoryAvailable() const { const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + - page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + + page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size + main_thread_stack_size}; if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); capacity != pool_size) { @@ -137,16 +135,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() { return memory_usage_capacity; } -u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() { +u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); } -u64 KProcess::GetTotalPhysicalMemoryUsed() { - return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() + +u64 KProcess::GetTotalPhysicalMemoryUsed() const { + return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() + GetSystemResourceSize(); } -u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { +u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); } @@ -246,7 +244,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad shmem->Open(); shemen_info->Open(); - R_SUCCEED(); + return ResultSuccess; } void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, @@ -291,12 +289,12 @@ Result KProcess::Reset() { KScopedSchedulerLock sl{kernel}; // Validate that we're in a state that we can reset. - R_UNLESS(state != State::Terminated, ResultInvalidState); + R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); R_UNLESS(is_signaled, ResultInvalidState); // Clear signaled. is_signaled = false; - R_SUCCEED(); + return ResultSuccess; } Result KProcess::SetActivity(ProcessActivity activity) { @@ -306,13 +304,15 @@ Result KProcess::SetActivity(ProcessActivity activity) { KScopedSchedulerLock sl{kernel}; // Validate our state. - R_UNLESS(state != State::Terminating, ResultInvalidState); - R_UNLESS(state != State::Terminated, ResultInvalidState); + R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState); + R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); // Either pause or resume. if (activity == ProcessActivity::Paused) { // Verify that we're not suspended. - R_UNLESS(!is_suspended, ResultInvalidState); + if (is_suspended) { + return ResultInvalidState; + } // Suspend all threads. for (auto* thread : GetThreadList()) { @@ -325,7 +325,9 @@ Result KProcess::SetActivity(ProcessActivity activity) { ASSERT(activity == ProcessActivity::Runnable); // Verify that we're suspended. - R_UNLESS(is_suspended, ResultInvalidState); + if (!is_suspended) { + return ResultInvalidState; + } // Resume all threads. for (auto* thread : GetThreadList()) { @@ -336,7 +338,7 @@ Result KProcess::SetActivity(ProcessActivity activity) { SetSuspended(false); } - R_SUCCEED(); + return ResultSuccess; } Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { @@ -346,38 +348,35 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: system_resource_size = metadata.GetSystemResourceSize(); image_size = code_size; - // We currently do not support process-specific system resource - UNIMPLEMENTED_IF(system_resource_size != 0); - KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, code_size + system_resource_size); if (!memory_reservation.Succeeded()) { LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", code_size + system_resource_size); - R_RETURN(ResultLimitReached); + return ResultLimitReached; } // Initialize proces address space - if (const Result result{page_table.InitializeForProcess( - metadata.GetAddressSpaceType(), false, 0x8000000, code_size, - &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; + if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, + 0x8000000, code_size, + KMemoryManager::Pool::Application)}; result.IsError()) { - R_RETURN(result); + return result; } // Map process code region - if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(), - code_size / PageSize, KMemoryState::Code, - KMemoryPermission::None)}; + if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(), + code_size / PageSize, KMemoryState::Code, + KMemoryPermission::None)}; result.IsError()) { - R_RETURN(result); + return result; } // Initialize process capabilities const auto& caps{metadata.GetKernelCapabilities()}; if (const Result result{ - capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)}; + capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)}; result.IsError()) { - R_RETURN(result); + return result; } // Set memory usage capacity @@ -385,12 +384,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: case FileSys::ProgramAddressSpaceType::Is32Bit: case FileSys::ProgramAddressSpaceType::Is36Bit: case FileSys::ProgramAddressSpaceType::Is39Bit: - memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart(); + memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart(); break; case FileSys::ProgramAddressSpaceType::Is32BitNoMap: - memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() + - page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart(); + memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() + + page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart(); break; default: @@ -398,10 +397,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: } // Create TLS region - R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address))); + R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address))); memory_reservation.Commit(); - R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize())); + return handle_table.Initialize(capabilities.GetHandleTableSize()); } void KProcess::Run(s32 main_thread_priority, u64 stack_size) { @@ -410,15 +409,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) { resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; - ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); + ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError()); - ChangeState(State::Running); + ChangeStatus(ProcessStatus::Running); SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); } void KProcess::PrepareForTermination() { - ChangeState(State::Terminating); + ChangeStatus(ProcessStatus::Exiting); const auto stop_threads = [this](const std::vector& in_thread_list) { for (auto* thread : in_thread_list) { @@ -438,15 +437,15 @@ void KProcess::PrepareForTermination() { stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); - this->DeleteThreadLocalRegion(plr_address); - plr_address = 0; + this->DeleteThreadLocalRegion(tls_region_address); + tls_region_address = 0; if (resource_limit) { resource_limit->Release(LimitableResource::PhysicalMemory, main_thread_stack_size + image_size); } - ChangeState(State::Terminated); + ChangeStatus(ProcessStatus::Exited); } void KProcess::Finalize() { @@ -475,7 +474,7 @@ void KProcess::Finalize() { } // Finalize the page table. - page_table.Finalize(); + page_table.reset(); // Perform inherited finalization. KAutoObjectWithSlabHeapAndContainer::Finalize(); @@ -500,7 +499,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { } *out = tlr; - R_SUCCEED(); + return ResultSuccess; } } @@ -529,7 +528,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { // We succeeded! tlp_guard.Cancel(); *out = tlr; - R_SUCCEED(); + return ResultSuccess; } Result KProcess::DeleteThreadLocalRegion(VAddr addr) { @@ -577,7 +576,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { KThreadLocalPage::Free(kernel, page_to_free); } - R_SUCCEED(); + return ResultSuccess; } bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, @@ -629,7 +628,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { const auto ReprotectSegment = [&](const CodeSet::Segment& segment, Svc::MemoryPermission permission) { - page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); + page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); }; kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), @@ -646,18 +645,19 @@ bool KProcess::IsSignaled() const { } KProcess::KProcess(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()}, + : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique( + kernel_.System())}, handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, state_lock{kernel_}, list_lock{kernel_} {} KProcess::~KProcess() = default; -void KProcess::ChangeState(State new_state) { - if (state == new_state) { +void KProcess::ChangeStatus(ProcessStatus new_status) { + if (status == new_status) { return; } - state = new_state; + status = new_status; is_signaled = true; NotifyAvailable(); } @@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { // The kernel always ensures that the given stack size is page aligned. main_thread_stack_size = Common::AlignUp(stack_size, PageSize); - const VAddr start{page_table.GetStackRegionStart()}; - const std::size_t size{page_table.GetStackRegionEnd() - start}; + const VAddr start{page_table->GetStackRegionStart()}; + const std::size_t size{page_table->GetStackRegionEnd() - start}; CASCADE_RESULT(main_thread_stack_top, - page_table.AllocateAndMapMemory( + page_table->AllocateAndMapMemory( main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, KMemoryState::Stack, KMemoryPermission::UserReadWrite)); main_thread_stack_top += main_thread_stack_size; - R_SUCCEED(); + return ResultSuccess; } } // namespace Kernel diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index 788faec1d..d56d73bab 100755 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -13,7 +13,6 @@ #include "core/hle/kernel/k_auto_object.h" #include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_handle_table.h" -#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread_local_page.h" #include "core/hle/kernel/k_worker_task.h" @@ -32,6 +31,7 @@ class ProgramMetadata; namespace Kernel { class KernelCore; +class KPageTable; class KResourceLimit; class KThread; class KSharedMemoryInfo; @@ -45,6 +45,24 @@ enum class MemoryRegion : u16 { BASE = 3, }; +/** + * Indicates the status of a Process instance. + * + * @note These match the values as used by kernel, + * so new entries should only be added if RE + * shows that a new value has been introduced. + */ +enum class ProcessStatus { + Created, + CreatedWithDebuggerAttached, + Running, + WaitingForDebuggerToAttach, + DebuggerAttached, + Exiting, + Exited, + DebugBreak, +}; + enum class ProcessActivity : u32 { Runnable, Paused, @@ -71,17 +89,6 @@ public: explicit KProcess(KernelCore& kernel_); ~KProcess() override; - enum class State { - Created = Svc::ProcessState_Created, - CreatedAttached = Svc::ProcessState_CreatedAttached, - Running = Svc::ProcessState_Running, - Crashed = Svc::ProcessState_Crashed, - RunningAttached = Svc::ProcessState_RunningAttached, - Terminating = Svc::ProcessState_Terminating, - Terminated = Svc::ProcessState_Terminated, - DebugBreak = Svc::ProcessState_DebugBreak, - }; - enum : u64 { /// Lowest allowed process ID for a kernel initial process. InitialKIPIDMin = 1, @@ -107,12 +114,12 @@ public: /// Gets a reference to the process' page table. KPageTable& PageTable() { - return page_table; + return *page_table; } /// Gets const a reference to the process' page table. const KPageTable& PageTable() const { - return page_table; + return *page_table; } /// Gets a reference to the process' handle table. @@ -138,25 +145,26 @@ public: } Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { - R_RETURN(condition_var.Wait(address, cv_key, tag, ns)); + return condition_var.Wait(address, cv_key, tag, ns); } Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { - R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count)); + return address_arbiter.SignalToAddress(address, signal_type, value, count); } Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, s64 timeout) { - R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout)); + return address_arbiter.WaitForAddress(address, arb_type, value, timeout); } - VAddr GetProcessLocalRegionAddress() const { - return plr_address; + /// Gets the address to the process' dedicated TLS region. + VAddr GetTLSRegionAddress() const { + return tls_region_address; } /// Gets the current status of the process - State GetState() const { - return state; + ProcessStatus GetStatus() const { + return status; } /// Gets the unique ID that identifies this particular process. @@ -278,18 +286,18 @@ public: } /// Retrieves the total physical memory available to this process in bytes. - u64 GetTotalPhysicalMemoryAvailable(); + u64 GetTotalPhysicalMemoryAvailable() const; /// Retrieves the total physical memory available to this process in bytes, /// without the size of the personal system resource heap added to it. - u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource(); + u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const; /// Retrieves the total physical memory used by this process in bytes. - u64 GetTotalPhysicalMemoryUsed(); + u64 GetTotalPhysicalMemoryUsed() const; /// Retrieves the total physical memory used by this process in bytes, /// without the size of the personal system resource heap added to it. - u64 GetTotalPhysicalMemoryUsedWithoutSystemResource(); + u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const; /// Gets the list of all threads created with this process as their owner. std::list& GetThreadList() { @@ -407,24 +415,19 @@ private: pinned_threads[core_id] = nullptr; } - void FinalizeHandleTable() { - // Finalize the table. - handle_table.Finalize(); - - // Note that the table is finalized. - is_handle_table_initialized = false; - } - - void ChangeState(State new_state); + /// Changes the process status. If the status is different + /// from the current process status, then this will trigger + /// a process signal. + void ChangeStatus(ProcessStatus new_status); /// Allocates the main thread stack for the process, given the stack size in bytes. Result AllocateMainThreadStack(std::size_t stack_size); /// Memory manager for this process - KPageTable page_table; + std::unique_ptr page_table; /// Current status of the process - State state{}; + ProcessStatus status{}; /// The ID of this process u64 process_id = 0; @@ -440,8 +443,6 @@ private: /// Resource limit descriptor for this process KResourceLimit* resource_limit{}; - VAddr system_resource_address{}; - /// The ideal CPU core for this process, threads are scheduled on this core by default. u8 ideal_core = 0; @@ -468,7 +469,7 @@ private: KConditionVariable condition_var; /// Address indicating the location of the process' dedicated TLS region. - VAddr plr_address = 0; + VAddr tls_region_address = 0; /// Random values for svcGetInfo RandomEntropy std::array random_entropy{}; @@ -494,12 +495,8 @@ private: /// Schedule count of this process s64 schedule_count{}; - size_t memory_release_hint{}; - bool is_signaled{}; bool is_suspended{}; - bool is_immortal{}; - bool is_handle_table_initialized{}; bool is_initialized{}; std::atomic num_running_threads{}; diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index a039cc591..8ff1545b6 100755 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp @@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o is_initialized = true; // Clear all pages in the memory. - std::memset(device_memory_.GetPointer(physical_address_), 0, size_); + std::memset(device_memory_.GetPointer(physical_address_), 0, size_); return ResultSuccess; } diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h index 5620c3660..34cb98456 100755 --- a/src/core/hle/kernel/k_shared_memory.h +++ b/src/core/hle/kernel/k_shared_memory.h @@ -54,7 +54,7 @@ public: * @return A pointer to the shared memory block from the specified offset */ u8* GetPointer(std::size_t offset = 0) { - return device_memory->GetPointer(physical_address + offset); + return device_memory->GetPointer(physical_address + offset); } /** @@ -63,7 +63,7 @@ public: * @return A pointer to the shared memory block from the specified offset */ const u8* GetPointer(std::size_t offset = 0) const { - return device_memory->GetPointer(physical_address + offset); + return device_memory->GetPointer(physical_address + offset); } void Finalize() override; diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index b7bfcdce3..174afc80d 100755 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -30,7 +30,6 @@ #include "core/hle/kernel/k_worker_task_manager.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" -#include "core/hle/kernel/svc_types.h" #include "core/hle/result.h" #include "core/memory.h" @@ -39,9 +38,6 @@ #endif namespace { - -constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1; - static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, u32 entry_point, u32 arg) { context = {}; @@ -245,7 +241,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack } } - R_SUCCEED(); + return ResultSuccess; } Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, @@ -258,7 +254,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ thread->host_context = std::make_shared(std::move(init_func)); thread->is_single_core = !Settings::values.use_multi_core.GetValue(); - R_SUCCEED(); + return ResultSuccess; } Result KThread::InitializeDummyThread(KThread* thread) { @@ -268,32 +264,31 @@ Result KThread::InitializeDummyThread(KThread* thread) { // Initialize emulation parameters. thread->stack_parameters.disable_count = 0; - R_SUCCEED(); + return ResultSuccess; } Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { - R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, - ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc())); + return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, + system.GetCpuManager().GetGuestActivateFunc()); } Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { - R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, - ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc())); + return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, + system.GetCpuManager().GetIdleThreadStartFunc()); } Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, KThreadFunction func, uintptr_t arg, s32 virt_core) { - R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, - ThreadType::HighPriority, - system.GetCpuManager().GetShutdownThreadStartFunc())); + return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, + system.GetCpuManager().GetShutdownThreadStartFunc()); } Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, KProcess* owner) { system.Kernel().GlobalSchedulerContext().AddThread(thread); - R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, - ThreadType::User, system.GetCpuManager().GetGuestThreadFunc())); + return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, + ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()); } void KThread::PostDestroy(uintptr_t arg) { @@ -543,7 +538,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { *out_ideal_core = virtual_ideal_core_id; *out_affinity_mask = virtual_affinity_mask; - R_SUCCEED(); + return ResultSuccess; } Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { @@ -559,7 +554,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); } - R_SUCCEED(); + return ResultSuccess; } Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { @@ -671,7 +666,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { } while (retry_update); } - R_SUCCEED(); + return ResultSuccess; } void KThread::SetBasePriority(s32 value) { @@ -844,7 +839,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { } while (thread_is_current); } - R_SUCCEED(); + return ResultSuccess; } Result KThread::GetThreadContext3(std::vector& out) { @@ -879,7 +874,7 @@ Result KThread::GetThreadContext3(std::vector& out) { } } - R_SUCCEED(); + return ResultSuccess; } void KThread::AddWaiterImpl(KThread* thread) { @@ -1043,7 +1038,7 @@ Result KThread::Run() { // Set our state and finish. SetState(ThreadState::Runnable); - R_SUCCEED(); + return ResultSuccess; } } @@ -1078,78 +1073,6 @@ void KThread::Exit() { UNREACHABLE_MSG("KThread::Exit() would return"); } -Result KThread::Terminate() { - ASSERT(this != GetCurrentThreadPointer(kernel)); - - // Request the thread terminate if it hasn't already. - if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) { - // If the thread isn't terminated, wait for it to terminate. - s32 index; - KSynchronizationObject* objects[] = {this}; - R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1, - Svc::WaitInfinite)); - } - - R_SUCCEED(); -} - -ThreadState KThread::RequestTerminate() { - ASSERT(this != GetCurrentThreadPointer(kernel)); - - KScopedSchedulerLock sl{kernel}; - - // Determine if this is the first termination request. - const bool first_request = [&]() -> bool { - // Perform an atomic compare-and-swap from false to true. - bool expected = false; - return termination_requested.compare_exchange_strong(expected, true); - }(); - - // If this is the first request, start termination procedure. - if (first_request) { - // If the thread is in initialized state, just change state to terminated. - if (this->GetState() == ThreadState::Initialized) { - thread_state = ThreadState::Terminated; - return ThreadState::Terminated; - } - - // Register the terminating dpc. - this->RegisterDpc(DpcFlag::Terminating); - - // If the thread is pinned, unpin it. - if (this->GetStackParameters().is_pinned) { - this->GetOwnerProcess()->UnpinThread(this); - } - - // If the thread is suspended, continue it. - if (this->IsSuspended()) { - suspend_allowed_flags = 0; - this->UpdateState(); - } - - // Change the thread's priority to be higher than any system thread's. - if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) { - this->SetBasePriority(TerminatingThreadPriority); - } - - // If the thread is runnable, send a termination interrupt to other cores. - if (this->GetState() == ThreadState::Runnable) { - if (const u64 core_mask = - physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel)); - core_mask != 0) { - Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask); - } - } - - // Wake up the thread. - if (this->GetState() == ThreadState::Waiting) { - wait_queue->CancelWait(this, ResultTerminationRequested, true); - } - } - - return this->GetState(); -} - Result KThread::Sleep(s64 timeout) { ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); ASSERT(this == GetCurrentThreadPointer(kernel)); @@ -1163,7 +1086,7 @@ Result KThread::Sleep(s64 timeout) { // Check if the thread should terminate. if (this->IsTerminationRequested()) { slp.CancelSleep(); - R_THROW(ResultTerminationRequested); + return ResultTerminationRequested; } // Wait for the sleep to end. @@ -1171,7 +1094,7 @@ Result KThread::Sleep(s64 timeout) { SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); } - R_SUCCEED(); + return ResultSuccess; } void KThread::IfDummyThreadTryWait() { diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index e2a27d603..9ee20208e 100755 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -180,10 +180,6 @@ public: void Exit(); - Result Terminate(); - - ThreadState RequestTerminate(); - [[nodiscard]] u32 GetSuspendFlags() const { return suspend_allowed_flags & suspend_request_flags; } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index eed2dc9f3..9251f29ad 100755 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -24,7 +24,6 @@ #include "core/hardware_properties.h" #include "core/hle/kernel/init/init_slab_setup.h" #include "core/hle/kernel/k_client_port.h" -#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_manager.h" @@ -74,16 +73,8 @@ struct KernelCore::Impl { InitializeMemoryLayout(); Init::InitializeKPageBufferSlabHeap(system); InitializeShutdownThreads(); - InitializePhysicalCores(); InitializePreemption(kernel); - - // Initialize the Dynamic Slab Heaps. - { - const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); - ASSERT(pt_heap_region.GetEndAddress() != 0); - - InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize()); - } + InitializePhysicalCores(); RegisterHostThread(); } @@ -95,15 +86,6 @@ struct KernelCore::Impl { } } - void CloseCurrentProcess() { - (*current_process).Finalize(); - // current_process->Close(); - // TODO: The current process should be destroyed based on accurate ref counting after - // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. - (*current_process).Destroy(); - current_process = nullptr; - } - void Shutdown() { is_shutting_down.store(true, std::memory_order_relaxed); SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); @@ -117,6 +99,10 @@ struct KernelCore::Impl { next_user_process_id = KProcess::ProcessIDMin; next_thread_id = 1; + for (auto& core : cores) { + core = nullptr; + } + global_handle_table->Finalize(); global_handle_table.reset(); @@ -166,7 +152,15 @@ struct KernelCore::Impl { } } - CloseCurrentProcess(); + // Shutdown all processes. + if (current_process) { + (*current_process).Finalize(); + // current_process->Close(); + // TODO: The current process should be destroyed based on accurate ref counting after + // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. + (*current_process).Destroy(); + current_process = nullptr; + } // Track kernel objects that were not freed on shutdown { @@ -263,18 +257,6 @@ struct KernelCore::Impl { system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); } - void InitializeResourceManagers(VAddr address, size_t size) { - dynamic_page_manager = std::make_unique(); - memory_block_heap = std::make_unique(); - app_memory_block_manager = std::make_unique(); - - dynamic_page_manager->Initialize(address, size); - static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; - memory_block_heap->Initialize(dynamic_page_manager.get(), - ApplicationMemoryBlockSlabHeapSize); - app_memory_block_manager->Initialize(nullptr, memory_block_heap.get()); - } - void InitializeShutdownThreads() { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { shutdown_threads[core_id] = KThread::Create(system.Kernel()); @@ -362,6 +344,11 @@ struct KernelCore::Impl { static inline thread_local KThread* current_thread{nullptr}; KThread* GetCurrentEmuThread() { + // If we are shutting down the kernel, none of this is relevant anymore. + if (IsShuttingDown()) { + return {}; + } + const auto thread_id = GetCurrentHostThreadID(); if (thread_id >= Core::Hardware::NUM_CPU_CORES) { return GetHostDummyThread(); @@ -783,11 +770,6 @@ struct KernelCore::Impl { // Kernel memory management std::unique_ptr memory_manager; - // Dynamic slab managers - std::unique_ptr dynamic_page_manager; - std::unique_ptr memory_block_heap; - std::unique_ptr app_memory_block_manager; - // Shared memory for services Kernel::KSharedMemory* hid_shared_mem{}; Kernel::KSharedMemory* font_shared_mem{}; @@ -871,10 +853,6 @@ const KProcess* KernelCore::CurrentProcess() const { return impl->current_process; } -void KernelCore::CloseCurrentProcess() { - impl->CloseCurrentProcess(); -} - const std::vector& KernelCore::GetProcessList() const { return impl->process_list; } @@ -1063,14 +1041,6 @@ const KMemoryManager& KernelCore::MemoryManager() const { return *impl->memory_manager; } -KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() { - return *impl->app_memory_block_manager; -} - -const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const { - return *impl->app_memory_block_manager; -} - Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { return *impl->hid_shared_mem; } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 6eded9539..0847cbcbf 100755 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -37,7 +37,6 @@ class KClientSession; class KEvent; class KHandleTable; class KLinkedListNode; -class KMemoryBlockSlabManager; class KMemoryLayout; class KMemoryManager; class KPageBuffer; @@ -131,9 +130,6 @@ public: /// Retrieves a const pointer to the current process. const KProcess* CurrentProcess() const; - /// Closes the current process. - void CloseCurrentProcess(); - /// Retrieves the list of processes. const std::vector& GetProcessList() const; @@ -242,12 +238,6 @@ public: /// Gets the virtual memory manager for the kernel. const KMemoryManager& MemoryManager() const; - /// Gets the application memory block manager for the kernel. - KMemoryBlockSlabManager& GetApplicationMemoryBlockManager(); - - /// Gets the application memory block manager for the kernel. - const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const; - /// Gets the shared memory object for HID services. Kernel::KSharedMemory& GetHidSharedMem(); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index b07ae3f02..1d145ea91 100755 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -933,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han return ResultSuccess; case GetInfoType::UserExceptionContextAddr: - *result = process->GetProcessLocalRegionAddress(); + *result = process->GetTLSRegionAddress(); return ResultSuccess; case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: @@ -1888,7 +1888,7 @@ static void ExitProcess(Core::System& system) { auto* current_process = system.Kernel().CurrentProcess(); LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); - ASSERT_MSG(current_process->GetState() == KProcess::State::Running, + ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, "Process has already exited"); system.Exit(); @@ -2557,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand return ResultInvalidEnumValue; } - *out = static_cast(process->GetState()); + *out = static_cast(process->GetStatus()); return ResultSuccess; } diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h index 1c821c87f..95750c3eb 100755 --- a/src/core/hle/kernel/svc_common.h +++ b/src/core/hle/kernel/svc_common.h @@ -14,13 +14,8 @@ namespace Kernel::Svc { using namespace Common::Literals; -enum { - HandleWaitMask = (1u << 30), -}; - -constexpr inline s32 ArgumentHandleCountMax = 0x40; - -constexpr inline s64 WaitInfinite = -1; +constexpr s32 ArgumentHandleCountMax = 0x40; +constexpr u32 HandleWaitMask{1u << 30}; constexpr inline std::size_t HeapSizeAlignment = 2_MiB; diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index bb4f7b004..79e15183a 100755 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -95,19 +95,6 @@ constexpr inline s32 IdealCoreNoUpdate = -3; constexpr inline s32 LowestThreadPriority = 63; constexpr inline s32 HighestThreadPriority = 0; -constexpr inline s32 SystemThreadPriorityHighest = 16; - -enum ProcessState : u32 { - ProcessState_Created = 0, - ProcessState_CreatedAttached = 1, - ProcessState_Running = 2, - ProcessState_Crashed = 3, - ProcessState_RunningAttached = 4, - ProcessState_Terminating = 5, - ProcessState_Terminated = 6, - ProcessState_DebugBreak = 7, -}; - constexpr inline size_t ThreadLocalRegionSize = 0x200; } // namespace Kernel::Svc diff --git a/src/core/hle/result.h b/src/core/hle/result.h index ef4b2d417..d67e68bae 100755 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h @@ -135,14 +135,6 @@ union Result { [[nodiscard]] constexpr bool IsFailure() const { return !IsSuccess(); } - - [[nodiscard]] constexpr u32 GetInnerValue() const { - return static_cast(module.Value()) | (description << module.bits); - } - - [[nodiscard]] constexpr bool Includes(Result result) const { - return GetInnerValue() == result.GetInnerValue(); - } }; static_assert(std::is_trivial_v); @@ -470,6 +462,9 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess; #define R_UNLESS(expr, res) \ { \ if (!(expr)) { \ + if (res.IsError()) { \ + LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \ + } \ R_THROW(res); \ } \ } diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index 652441bc2..becd6d1b9 100755 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp @@ -290,7 +290,7 @@ public: const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; const auto start_info{page_table.QueryInfo(start - 1)}; - if (start_info.GetState() != Kernel::KMemoryState::Free) { + if (start_info.state != Kernel::KMemoryState::Free) { return {}; } @@ -300,7 +300,7 @@ public: const auto end_info{page_table.QueryInfo(start + size)}; - if (end_info.GetState() != Kernel::KMemoryState::Free) { + if (end_info.state != Kernel::KMemoryState::Free) { return {}; } diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index b60679021..ddf273b5e 100755 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -128,8 +128,7 @@ NvResult nvmap::IocAlloc(const std::vector& input, std::vector& output) } ASSERT(system.CurrentProcess() ->PageTable() - .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, - Kernel::KMemoryPermission::None, true) + .LockForDeviceAddressSpace(handle_description->address, handle_description->size) .IsSuccess()); std::memcpy(output.data(), ¶ms, sizeof(params)); return result; diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 9637cb5b1..2ac792566 100755 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -65,7 +65,7 @@ struct Memory::Impl { return {}; } - return system.DeviceMemory().GetPointer(paddr) + vaddr; + return system.DeviceMemory().GetPointer(paddr) + vaddr; } [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { @@ -75,7 +75,7 @@ struct Memory::Impl { return {}; } - return system.DeviceMemory().GetPointer(paddr) + vaddr; + return system.DeviceMemory().GetPointer(paddr) + vaddr; } u8 Read8(const VAddr addr) { @@ -499,7 +499,7 @@ struct Memory::Impl { } else { while (base != end) { page_table.pointers[base].Store( - system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type); + system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type); page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); ASSERT_MSG(page_table.pointers[base].Pointer(), diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp index 284b2ae66..7c432a63c 100755 --- a/src/tests/core/core_timing.cpp +++ b/src/tests/core/core_timing.cpp @@ -40,6 +40,9 @@ struct ScopeInit final { core_timing.SetMulticore(true); core_timing.Initialize([]() {}); } + ~ScopeInit() { + core_timing.Shutdown(); + } Core::Timing::CoreTiming core_timing; }; diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index ad8e0ec50..ebbebfa66 100755 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -483,9 +483,7 @@ void Maxwell3D::ProcessQueryGet() { switch (regs.report_semaphore.query.operation) { case Regs::ReportSemaphore::Operation::Release: - if (regs.report_semaphore.query.release == - Regs::ReportSemaphore::Release::AfterAllPreceedingWrites || - regs.report_semaphore.query.short_query != 0) { + if (regs.report_semaphore.query.short_query != 0) { const GPUVAddr sequence_address{regs.report_semaphore.Address()}; const u32 payload = regs.report_semaphore.payload; std::function operation([this, sequence_address, payload] { @@ -499,11 +497,10 @@ void Maxwell3D::ProcessQueryGet() { }; const GPUVAddr sequence_address{regs.report_semaphore.Address()}; const u32 payload = regs.report_semaphore.payload; - std::function operation([this, sequence_address, payload] { + [this, sequence_address, payload] { memory_manager.Write(sequence_address + sizeof(u64), system.GPU().GetTicks()); memory_manager.Write(sequence_address, payload); - }); - rasterizer->SyncOperation(std::move(operation)); + }(); } break; case Regs::ReportSemaphore::Operation::Acquire: @@ -579,11 +576,11 @@ void Maxwell3D::ProcessCounterReset() { void Maxwell3D::ProcessSyncPoint() { const u32 sync_point = regs.sync_info.sync_point.Value(); - const auto condition = regs.sync_info.condition.Value(); - [[maybe_unused]] const u32 cache_flush = regs.sync_info.clean_l2.Value(); - if (condition == Regs::SyncInfo::Condition::RopWritesDone) { - rasterizer->SignalSyncPoint(sync_point); + const u32 cache_flush = regs.sync_info.clean_l2.Value(); + if (cache_flush != 0) { + rasterizer->InvalidateGPUCache(); } + rasterizer->SignalSyncPoint(sync_point); } std::optional Maxwell3D::GetQueryResult() { diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp index cca890792..3977bb0fb 100755 --- a/src/video_core/engines/puller.cpp +++ b/src/video_core/engines/puller.cpp @@ -75,11 +75,10 @@ void Puller::ProcessSemaphoreTriggerMethod() { if (op == GpuSemaphoreOperation::WriteLong) { const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()}; const u32 payload = regs.semaphore_sequence; - std::function operation([this, sequence_address, payload] { + [this, sequence_address, payload] { memory_manager.Write(sequence_address + sizeof(u64), gpu.GetTicks()); memory_manager.Write(sequence_address, payload); - }); - rasterizer->SignalFence(std::move(operation)); + }(); } else { do { const u32 word{memory_manager.Read(regs.semaphore_address.SemaphoreAddress())}; diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 4b15c0f85..7cb02631c 100755 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp @@ -59,11 +59,10 @@ void QueryPool::Reserve(std::pair query) { std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) { return query_pool == *pool; }); + ASSERT(it != std::end(pools)); - if (it != std::end(pools)) { - const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); - usage[pool_index * GROW_STEP + static_cast(query.second)] = false; - } + const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); + usage[pool_index * GROW_STEP + static_cast(query.second)] = false; } QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp index 6acfb7b06..24251247d 100755 --- a/src/yuzu/bootmanager.cpp +++ b/src/yuzu/bootmanager.cpp @@ -120,8 +120,8 @@ void EmuThread::run() { } } - // Shutdown the main emulated process - system.ShutdownMainProcess(); + // Shutdown the core emulation + system.Shutdown(); #if MICROPROFILE_ENABLED MicroProfileOnThreadExit(); diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index a43a110ac..ea6225a13 100755 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp @@ -294,7 +294,6 @@ GMainWindow::GMainWindow(std::unique_ptr config_, bool has_broken_vulkan #ifdef __linux__ SetupSigInterrupts(); #endif - system->Initialize(); Common::Log::Initialize(); LoadTranslation(); diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp index e16f79eb4..3a0f33cba 100755 --- a/src/yuzu_cmd/yuzu.cpp +++ b/src/yuzu_cmd/yuzu.cpp @@ -302,8 +302,6 @@ int main(int argc, char** argv) { } Core::System system{}; - system.Initialize(); - InputCommon::InputSubsystem input_subsystem{}; // Apply the command line arguments @@ -394,7 +392,7 @@ int main(int argc, char** argv) { } system.DetachDebugger(); void(system.Pause()); - system.ShutdownMainProcess(); + system.Shutdown(); detached_tasks.WaitForAllTasks(); return 0;