early-access version 3262
This commit is contained in:
		| @@ -1,7 +1,7 @@ | ||||
| yuzu emulator early access | ||||
| ============= | ||||
|  | ||||
| This is the source code for early-access 3260. | ||||
| This is the source code for early-access 3262. | ||||
|  | ||||
| ## Legal Notice | ||||
|  | ||||
|   | ||||
| @@ -226,7 +226,6 @@ add_library(core STATIC | ||||
|     hle/kernel/k_page_buffer.h | ||||
|     hle/kernel/k_page_heap.cpp | ||||
|     hle/kernel/k_page_heap.h | ||||
|     hle/kernel/k_page_group.cpp | ||||
|     hle/kernel/k_page_group.h | ||||
|     hle/kernel/k_page_table.cpp | ||||
|     hle/kernel/k_page_table.h | ||||
|   | ||||
| @@ -140,7 +140,9 @@ void EmulatedController::LoadDevices() { | ||||
|     battery_params[LeftIndex].Set("battery", true); | ||||
|     battery_params[RightIndex].Set("battery", true); | ||||
|  | ||||
|     camera_params = Common::ParamPackage{"engine:camera,camera:1"}; | ||||
|     camera_params[0] = right_joycon; | ||||
|     camera_params[0].Set("camera", true); | ||||
|     camera_params[1] = Common::ParamPackage{"engine:camera,camera:1"}; | ||||
|     ring_params[1] = Common::ParamPackage{"engine:joycon,axis_x:100,axis_y:101"}; | ||||
|     nfc_params[0] = Common::ParamPackage{"engine:virtual_amiibo,nfc:1"}; | ||||
|     nfc_params[1] = right_joycon; | ||||
| @@ -148,7 +150,7 @@ void EmulatedController::LoadDevices() { | ||||
|  | ||||
|     output_params[LeftIndex] = left_joycon; | ||||
|     output_params[RightIndex] = right_joycon; | ||||
|     output_params[2] = camera_params; | ||||
|     output_params[2] = camera_params[1]; | ||||
|     output_params[3] = nfc_params[0]; | ||||
|     output_params[LeftIndex].Set("output", true); | ||||
|     output_params[RightIndex].Set("output", true); | ||||
| @@ -166,7 +168,7 @@ void EmulatedController::LoadDevices() { | ||||
|     std::ranges::transform(battery_params, battery_devices.begin(), | ||||
|                            Common::Input::CreateInputDevice); | ||||
|     std::ranges::transform(color_params, color_devices.begin(), Common::Input::CreateInputDevice); | ||||
|     camera_devices = Common::Input::CreateInputDevice(camera_params); | ||||
|     std::ranges::transform(camera_params, camera_devices.begin(), Common::Input::CreateInputDevice); | ||||
|     std::ranges::transform(ring_params, ring_analog_devices.begin(), | ||||
|                            Common::Input::CreateInputDevice); | ||||
|     std::ranges::transform(nfc_params, nfc_devices.begin(), Common::Input::CreateInputDevice); | ||||
| @@ -357,12 +359,15 @@ void EmulatedController::ReloadInput() { | ||||
|         motion_devices[index]->ForceUpdate(); | ||||
|     } | ||||
|  | ||||
|     if (camera_devices) { | ||||
|         camera_devices->SetCallback({ | ||||
|     for (std::size_t index = 0; index < camera_devices.size(); ++index) { | ||||
|         if (!camera_devices[index]) { | ||||
|             continue; | ||||
|         } | ||||
|         camera_devices[index]->SetCallback({ | ||||
|             .on_change = | ||||
|                 [this](const Common::Input::CallbackStatus& callback) { SetCamera(callback); }, | ||||
|         }); | ||||
|         camera_devices->ForceUpdate(); | ||||
|         camera_devices[index]->ForceUpdate(); | ||||
|     } | ||||
|  | ||||
|     for (std::size_t index = 0; index < ring_analog_devices.size(); ++index) { | ||||
| @@ -480,7 +485,9 @@ void EmulatedController::UnloadInput() { | ||||
|     for (auto& stick : virtual_stick_devices) { | ||||
|         stick.reset(); | ||||
|     } | ||||
|     camera_devices.reset(); | ||||
|     for (auto& camera : camera_devices) { | ||||
|         camera.reset(); | ||||
|     } | ||||
|     for (auto& ring : ring_analog_devices) { | ||||
|         ring.reset(); | ||||
|     } | ||||
|   | ||||
| @@ -39,7 +39,8 @@ using ColorDevices = | ||||
|     std::array<std::unique_ptr<Common::Input::InputDevice>, max_emulated_controllers>; | ||||
| using BatteryDevices = | ||||
|     std::array<std::unique_ptr<Common::Input::InputDevice>, max_emulated_controllers>; | ||||
| using CameraDevices = std::unique_ptr<Common::Input::InputDevice>; | ||||
| using CameraDevices = | ||||
|     std::array<std::unique_ptr<Common::Input::InputDevice>, max_emulated_controllers>; | ||||
| using RingAnalogDevices = | ||||
|     std::array<std::unique_ptr<Common::Input::InputDevice>, max_emulated_controllers>; | ||||
| using NfcDevices = | ||||
| @@ -52,7 +53,7 @@ using ControllerMotionParams = std::array<Common::ParamPackage, Settings::Native | ||||
| using TriggerParams = std::array<Common::ParamPackage, Settings::NativeTrigger::NumTriggers>; | ||||
| using ColorParams = std::array<Common::ParamPackage, max_emulated_controllers>; | ||||
| using BatteryParams = std::array<Common::ParamPackage, max_emulated_controllers>; | ||||
| using CameraParams = Common::ParamPackage; | ||||
| using CameraParams = std::array<Common::ParamPackage, max_emulated_controllers>; | ||||
| using RingAnalogParams = std::array<Common::ParamPackage, max_emulated_controllers>; | ||||
| using NfcParams = std::array<Common::ParamPackage, max_emulated_controllers>; | ||||
| using OutputParams = std::array<Common::ParamPackage, output_devices_size>; | ||||
|   | ||||
| @@ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si | ||||
|     auto& page_table = m_owner->PageTable(); | ||||
|  | ||||
|     // Construct the page group. | ||||
|     m_page_group.emplace(kernel, page_table.GetBlockInfoManager()); | ||||
|     m_page_group = {}; | ||||
|  | ||||
|     // Lock the memory. | ||||
|     R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size)) | ||||
|     R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size)) | ||||
|  | ||||
|     // Clear the memory. | ||||
|     for (const auto& block : *m_page_group) { | ||||
|     for (const auto& block : m_page_group.Nodes()) { | ||||
|         std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); | ||||
|     } | ||||
|  | ||||
| @@ -51,13 +51,12 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si | ||||
| void KCodeMemory::Finalize() { | ||||
|     // Unlock. | ||||
|     if (!m_is_mapped && !m_is_owner_mapped) { | ||||
|         const size_t size = m_page_group->GetNumPages() * PageSize; | ||||
|         m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group); | ||||
|         const size_t size = m_page_group.GetNumPages() * PageSize; | ||||
|         m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group); | ||||
|     } | ||||
|  | ||||
|     // Close the page group. | ||||
|     m_page_group->Close(); | ||||
|     m_page_group->Finalize(); | ||||
|     m_page_group = {}; | ||||
|  | ||||
|     // Close our reference to our owner. | ||||
|     m_owner->Close(); | ||||
| @@ -65,7 +64,7 @@ void KCodeMemory::Finalize() { | ||||
|  | ||||
| Result KCodeMemory::Map(VAddr address, size_t size) { | ||||
|     // Validate the size. | ||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|  | ||||
|     // Lock ourselves. | ||||
|     KScopedLightLock lk(m_lock); | ||||
| @@ -74,8 +73,8 @@ Result KCodeMemory::Map(VAddr address, size_t size) { | ||||
|     R_UNLESS(!m_is_mapped, ResultInvalidState); | ||||
|  | ||||
|     // Map the memory. | ||||
|     R_TRY(kernel.CurrentProcess()->PageTable().MapPageGroup( | ||||
|         address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); | ||||
|     R_TRY(kernel.CurrentProcess()->PageTable().MapPages( | ||||
|         address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); | ||||
|  | ||||
|     // Mark ourselves as mapped. | ||||
|     m_is_mapped = true; | ||||
| @@ -85,14 +84,14 @@ Result KCodeMemory::Map(VAddr address, size_t size) { | ||||
|  | ||||
| Result KCodeMemory::Unmap(VAddr address, size_t size) { | ||||
|     // Validate the size. | ||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|  | ||||
|     // Lock ourselves. | ||||
|     KScopedLightLock lk(m_lock); | ||||
|  | ||||
|     // Unmap the memory. | ||||
|     R_TRY(kernel.CurrentProcess()->PageTable().UnmapPageGroup(address, *m_page_group, | ||||
|                                                               KMemoryState::CodeOut)); | ||||
|     R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group, | ||||
|                                                           KMemoryState::CodeOut)); | ||||
|  | ||||
|     // Mark ourselves as unmapped. | ||||
|     m_is_mapped = false; | ||||
| @@ -102,7 +101,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) { | ||||
|  | ||||
| Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { | ||||
|     // Validate the size. | ||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|  | ||||
|     // Lock ourselves. | ||||
|     KScopedLightLock lk(m_lock); | ||||
| @@ -125,8 +124,8 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission | ||||
|     } | ||||
|  | ||||
|     // Map the memory. | ||||
|     R_TRY(m_owner->PageTable().MapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode, | ||||
|                                             k_perm)); | ||||
|     R_TRY( | ||||
|         m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm)); | ||||
|  | ||||
|     // Mark ourselves as mapped. | ||||
|     m_is_owner_mapped = true; | ||||
| @@ -136,13 +135,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission | ||||
|  | ||||
| Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { | ||||
|     // Validate the size. | ||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|  | ||||
|     // Lock ourselves. | ||||
|     KScopedLightLock lk(m_lock); | ||||
|  | ||||
|     // Unmap the memory. | ||||
|     R_TRY(m_owner->PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode)); | ||||
|     R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode)); | ||||
|  | ||||
|     // Mark ourselves as unmapped. | ||||
|     m_is_owner_mapped = false; | ||||
|   | ||||
| @@ -3,8 +3,6 @@ | ||||
|  | ||||
| #pragma once | ||||
|  | ||||
| #include <optional> | ||||
|  | ||||
| #include "common/common_types.h" | ||||
| #include "core/device_memory.h" | ||||
| #include "core/hle/kernel/k_auto_object.h" | ||||
| @@ -51,11 +49,11 @@ public: | ||||
|         return m_address; | ||||
|     } | ||||
|     size_t GetSize() const { | ||||
|         return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0; | ||||
|         return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0; | ||||
|     } | ||||
|  | ||||
| private: | ||||
|     std::optional<KPageGroup> m_page_group{}; | ||||
|     KPageGroup m_page_group{}; | ||||
|     KProcess* m_owner{}; | ||||
|     VAddr m_address{}; | ||||
|     KLightLock m_lock; | ||||
|   | ||||
| @@ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | ||||
|  | ||||
|     // Ensure that we don't leave anything un-freed. | ||||
|     ON_RESULT_FAILURE { | ||||
|         for (const auto& it : *out) { | ||||
|         for (const auto& it : out->Nodes()) { | ||||
|             auto& manager = this->GetManager(it.GetAddress()); | ||||
|             const size_t node_num_pages = std::min<u64>( | ||||
|                 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); | ||||
| @@ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | ||||
|                                       m_has_optimized_process[static_cast<size_t>(pool)], true)); | ||||
|  | ||||
|     // Open the first reference to the pages. | ||||
|     for (const auto& block : *out) { | ||||
|     for (const auto& block : out->Nodes()) { | ||||
|         PAddr cur_address = block.GetAddress(); | ||||
|         size_t remaining_pages = block.GetNumPages(); | ||||
|         while (remaining_pages > 0) { | ||||
| @@ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 | ||||
|     // Perform optimized memory tracking, if we should. | ||||
|     if (optimized) { | ||||
|         // Iterate over the allocated blocks. | ||||
|         for (const auto& block : *out) { | ||||
|         for (const auto& block : out->Nodes()) { | ||||
|             // Get the block extents. | ||||
|             const PAddr block_address = block.GetAddress(); | ||||
|             const size_t block_pages = block.GetNumPages(); | ||||
| @@ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 | ||||
|         } | ||||
|     } else { | ||||
|         // Set all the allocated memory. | ||||
|         for (const auto& block : *out) { | ||||
|         for (const auto& block : out->Nodes()) { | ||||
|             std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, | ||||
|                         block.GetSize()); | ||||
|         } | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||||
| // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project | ||||
| // SPDX-License-Identifier: GPL-2.0-or-later | ||||
|  | ||||
| #pragma once | ||||
| @@ -13,23 +13,24 @@ | ||||
|  | ||||
| namespace Kernel { | ||||
|  | ||||
| class KBlockInfoManager; | ||||
| class KernelCore; | ||||
| class KPageGroup; | ||||
|  | ||||
| class KBlockInfo { | ||||
| public: | ||||
|     constexpr explicit KBlockInfo() : m_next(nullptr) {} | ||||
| private: | ||||
|     friend class KPageGroup; | ||||
|  | ||||
|     constexpr void Initialize(KPhysicalAddress addr, size_t np) { | ||||
| public: | ||||
|     constexpr KBlockInfo() = default; | ||||
|  | ||||
|     constexpr void Initialize(PAddr addr, size_t np) { | ||||
|         ASSERT(Common::IsAligned(addr, PageSize)); | ||||
|         ASSERT(static_cast<u32>(np) == np); | ||||
|  | ||||
|         m_page_index = static_cast<u32>(addr / PageSize); | ||||
|         m_page_index = static_cast<u32>(addr) / PageSize; | ||||
|         m_num_pages = static_cast<u32>(np); | ||||
|     } | ||||
|  | ||||
|     constexpr KPhysicalAddress GetAddress() const { | ||||
|     constexpr PAddr GetAddress() const { | ||||
|         return m_page_index * PageSize; | ||||
|     } | ||||
|     constexpr size_t GetNumPages() const { | ||||
| @@ -38,10 +39,10 @@ public: | ||||
|     constexpr size_t GetSize() const { | ||||
|         return this->GetNumPages() * PageSize; | ||||
|     } | ||||
|     constexpr KPhysicalAddress GetEndAddress() const { | ||||
|     constexpr PAddr GetEndAddress() const { | ||||
|         return (m_page_index + m_num_pages) * PageSize; | ||||
|     } | ||||
|     constexpr KPhysicalAddress GetLastAddress() const { | ||||
|     constexpr PAddr GetLastAddress() const { | ||||
|         return this->GetEndAddress() - 1; | ||||
|     } | ||||
|  | ||||
| @@ -61,8 +62,8 @@ public: | ||||
|         return !(*this == rhs); | ||||
|     } | ||||
|  | ||||
|     constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const { | ||||
|         const KPhysicalAddress end = this->GetEndAddress(); | ||||
|     constexpr bool IsStrictlyBefore(PAddr addr) const { | ||||
|         const PAddr end = this->GetEndAddress(); | ||||
|  | ||||
|         if (m_page_index != 0 && end == 0) { | ||||
|             return false; | ||||
| @@ -71,11 +72,11 @@ public: | ||||
|         return end < addr; | ||||
|     } | ||||
|  | ||||
|     constexpr bool operator<(KPhysicalAddress addr) const { | ||||
|     constexpr bool operator<(PAddr addr) const { | ||||
|         return this->IsStrictlyBefore(addr); | ||||
|     } | ||||
|  | ||||
|     constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) { | ||||
|     constexpr bool TryConcatenate(PAddr addr, size_t np) { | ||||
|         if (addr != 0 && addr == this->GetEndAddress()) { | ||||
|             m_num_pages += static_cast<u32>(np); | ||||
|             return true; | ||||
| @@ -89,118 +90,96 @@ private: | ||||
|     } | ||||
|  | ||||
| private: | ||||
|     friend class KPageGroup; | ||||
|  | ||||
|     KBlockInfo* m_next{}; | ||||
|     u32 m_page_index{}; | ||||
|     u32 m_num_pages{}; | ||||
| }; | ||||
| static_assert(sizeof(KBlockInfo) <= 0x10); | ||||
|  | ||||
| class KPageGroup { | ||||
| class KPageGroup final { | ||||
| public: | ||||
|     class Iterator { | ||||
|     class Node final { | ||||
|     public: | ||||
|         using iterator_category = std::forward_iterator_tag; | ||||
|         using value_type = const KBlockInfo; | ||||
|         using difference_type = std::ptrdiff_t; | ||||
|         using pointer = value_type*; | ||||
|         using reference = value_type&; | ||||
|         constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {} | ||||
|  | ||||
|         constexpr explicit Iterator(pointer n) : m_node(n) {} | ||||
|  | ||||
|         constexpr bool operator==(const Iterator& rhs) const { | ||||
|             return m_node == rhs.m_node; | ||||
|         } | ||||
|         constexpr bool operator!=(const Iterator& rhs) const { | ||||
|             return !(*this == rhs); | ||||
|         constexpr u64 GetAddress() const { | ||||
|             return addr; | ||||
|         } | ||||
|  | ||||
|         constexpr pointer operator->() const { | ||||
|             return m_node; | ||||
|         } | ||||
|         constexpr reference operator*() const { | ||||
|             return *m_node; | ||||
|         constexpr std::size_t GetNumPages() const { | ||||
|             return num_pages; | ||||
|         } | ||||
|  | ||||
|         constexpr Iterator& operator++() { | ||||
|             m_node = m_node->GetNext(); | ||||
|             return *this; | ||||
|         } | ||||
|  | ||||
|         constexpr Iterator operator++(int) { | ||||
|             const Iterator it{*this}; | ||||
|             ++(*this); | ||||
|             return it; | ||||
|         constexpr std::size_t GetSize() const { | ||||
|             return GetNumPages() * PageSize; | ||||
|         } | ||||
|  | ||||
|     private: | ||||
|         pointer m_node{}; | ||||
|         u64 addr{}; | ||||
|         std::size_t num_pages{}; | ||||
|     }; | ||||
|  | ||||
|     explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m) | ||||
|         : m_kernel{kernel}, m_manager{m} {} | ||||
|     ~KPageGroup() { | ||||
|         this->Finalize(); | ||||
|     } | ||||
|  | ||||
|     void CloseAndReset(); | ||||
|     void Finalize(); | ||||
|  | ||||
|     Iterator begin() const { | ||||
|         return Iterator{m_first_block}; | ||||
|     } | ||||
|     Iterator end() const { | ||||
|         return Iterator{nullptr}; | ||||
|     } | ||||
|     bool empty() const { | ||||
|         return m_first_block == nullptr; | ||||
|     } | ||||
|  | ||||
|     Result AddBlock(KPhysicalAddress addr, size_t num_pages); | ||||
|     void Open() const; | ||||
|     void OpenFirst() const; | ||||
|     void Close() const; | ||||
|  | ||||
|     size_t GetNumPages() const; | ||||
|  | ||||
|     bool IsEquivalentTo(const KPageGroup& rhs) const; | ||||
|  | ||||
|     bool operator==(const KPageGroup& rhs) const { | ||||
|         return this->IsEquivalentTo(rhs); | ||||
|     } | ||||
|  | ||||
|     bool operator!=(const KPageGroup& rhs) const { | ||||
|         return !(*this == rhs); | ||||
|     } | ||||
|  | ||||
| private: | ||||
|     KernelCore& m_kernel; | ||||
|     KBlockInfo* m_first_block{}; | ||||
|     KBlockInfo* m_last_block{}; | ||||
|     KBlockInfoManager* m_manager{}; | ||||
| }; | ||||
|  | ||||
| class KScopedPageGroup { | ||||
| public: | ||||
|     explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) { | ||||
|         if (m_pg) { | ||||
|             m_pg->Open(); | ||||
|         } | ||||
|     } | ||||
|     explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {} | ||||
|     ~KScopedPageGroup() { | ||||
|         if (m_pg) { | ||||
|             m_pg->Close(); | ||||
|         } | ||||
|     KPageGroup() = default; | ||||
|     KPageGroup(u64 address, u64 num_pages) { | ||||
|         ASSERT(AddBlock(address, num_pages).IsSuccess()); | ||||
|     } | ||||
|  | ||||
|     void CancelClose() { | ||||
|         m_pg = nullptr; | ||||
|     constexpr std::list<Node>& Nodes() { | ||||
|         return nodes; | ||||
|     } | ||||
|  | ||||
|     constexpr const std::list<Node>& Nodes() const { | ||||
|         return nodes; | ||||
|     } | ||||
|  | ||||
|     std::size_t GetNumPages() const { | ||||
|         std::size_t num_pages = 0; | ||||
|         for (const Node& node : nodes) { | ||||
|             num_pages += node.GetNumPages(); | ||||
|         } | ||||
|         return num_pages; | ||||
|     } | ||||
|  | ||||
|     bool IsEqual(KPageGroup& other) const { | ||||
|         auto this_node = nodes.begin(); | ||||
|         auto other_node = other.nodes.begin(); | ||||
|         while (this_node != nodes.end() && other_node != other.nodes.end()) { | ||||
|             if (this_node->GetAddress() != other_node->GetAddress() || | ||||
|                 this_node->GetNumPages() != other_node->GetNumPages()) { | ||||
|                 return false; | ||||
|             } | ||||
|             this_node = std::next(this_node); | ||||
|             other_node = std::next(other_node); | ||||
|         } | ||||
|  | ||||
|         return this_node == nodes.end() && other_node == other.nodes.end(); | ||||
|     } | ||||
|  | ||||
|     Result AddBlock(u64 address, u64 num_pages) { | ||||
|         if (!num_pages) { | ||||
|             return ResultSuccess; | ||||
|         } | ||||
|         if (!nodes.empty()) { | ||||
|             const auto node = nodes.back(); | ||||
|             if (node.GetAddress() + node.GetNumPages() * PageSize == address) { | ||||
|                 address = node.GetAddress(); | ||||
|                 num_pages += node.GetNumPages(); | ||||
|                 nodes.pop_back(); | ||||
|             } | ||||
|         } | ||||
|         nodes.push_back({address, num_pages}); | ||||
|         return ResultSuccess; | ||||
|     } | ||||
|  | ||||
|     bool Empty() const { | ||||
|         return nodes.empty(); | ||||
|     } | ||||
|  | ||||
|     void Finalize() {} | ||||
|  | ||||
| private: | ||||
|     const KPageGroup* m_pg{}; | ||||
|     std::list<Node> nodes; | ||||
| }; | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a | ||||
|  | ||||
| KPageTable::KPageTable(Core::System& system_) | ||||
|     : m_general_lock{system_.Kernel()}, | ||||
|       m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {} | ||||
|       m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} | ||||
|  | ||||
| KPageTable::~KPageTable() = default; | ||||
|  | ||||
| @@ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta | ||||
|                                                  m_memory_block_slab_manager); | ||||
|  | ||||
|     // Allocate and open. | ||||
|     KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
|     KPageGroup pg; | ||||
|     R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||||
|         &pg, num_pages, | ||||
|         KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); | ||||
| @@ -432,12 +432,9 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si | ||||
|         const size_t num_pages = size / PageSize; | ||||
|  | ||||
|         // Create page groups for the memory being mapped. | ||||
|         KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
|         KPageGroup pg; | ||||
|         AddRegionToPages(src_address, num_pages, pg); | ||||
|  | ||||
|         // We're going to perform an update, so create a helper. | ||||
|         KScopedPageTableUpdater updater(this); | ||||
|  | ||||
|         // Reprotect the source as kernel-read/not mapped. | ||||
|         const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead | | ||||
|                                                              KMemoryPermission::NotMapped); | ||||
| @@ -450,10 +447,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si | ||||
|         }); | ||||
|  | ||||
|         // Map the alias pages. | ||||
|         const KPageProperties dst_properties = {new_perm, false, false, | ||||
|                                                 DisableMergeAttribute::DisableHead}; | ||||
|         R_TRY( | ||||
|             this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false)); | ||||
|         R_TRY(MapPages(dst_address, pg, new_perm)); | ||||
|  | ||||
|         // We successfully mapped the alias pages, so we don't need to unprotect the src pages on | ||||
|         // failure. | ||||
| @@ -599,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | ||||
|     const size_t size = num_pages * PageSize; | ||||
|  | ||||
|     // We're making a new group, not adding to an existing one. | ||||
|     R_UNLESS(pg.empty(), ResultInvalidCurrentMemory); | ||||
|     R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory); | ||||
|  | ||||
|     // Begin traversal. | ||||
|     Common::PageTable::TraversalContext context; | ||||
| @@ -646,10 +640,11 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|  | ||||
| bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) { | ||||
| bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { | ||||
|     ASSERT(this->IsLockedByCurrentThread()); | ||||
|  | ||||
|     const size_t size = num_pages * PageSize; | ||||
|     const auto& pg = pg_ll.Nodes(); | ||||
|     const auto& memory_layout = m_system.Kernel().MemoryLayout(); | ||||
|  | ||||
|     // Empty groups are necessarily invalid. | ||||
| @@ -947,6 +942,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | ||||
|  | ||||
|     ON_RESULT_FAILURE { | ||||
|         if (cur_mapped_addr != dst_addr) { | ||||
|             // HACK: Manually close the pages. | ||||
|             HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize); | ||||
|  | ||||
|             ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, | ||||
|                            KMemoryPermission::None, OperationType::Unmap) | ||||
|                        .IsSuccess()); | ||||
| @@ -1022,6 +1020,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | ||||
|         // Map the page. | ||||
|         R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); | ||||
|  | ||||
|         // HACK: Manually open the pages. | ||||
|         HACK_OpenPages(start_partial_page, 1); | ||||
|  | ||||
|         // Update tracking extents. | ||||
|         cur_mapped_addr += PageSize; | ||||
|         cur_block_addr += PageSize; | ||||
| @@ -1050,6 +1051,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | ||||
|             R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, | ||||
|                           cur_block_addr)); | ||||
|  | ||||
|             // HACK: Manually open the pages. | ||||
|             HACK_OpenPages(cur_block_addr, cur_block_size / PageSize); | ||||
|  | ||||
|             // Update tracking extents. | ||||
|             cur_mapped_addr += cur_block_size; | ||||
|             cur_block_addr = next_entry.phys_addr; | ||||
| @@ -1069,6 +1073,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | ||||
|         R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, | ||||
|                       cur_block_addr)); | ||||
|  | ||||
|         // HACK: Manually open the pages. | ||||
|         HACK_OpenPages(cur_block_addr, last_block_size / PageSize); | ||||
|  | ||||
|         // Update tracking extents. | ||||
|         cur_mapped_addr += last_block_size; | ||||
|         cur_block_addr += last_block_size; | ||||
| @@ -1100,6 +1107,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | ||||
|  | ||||
|         // Map the page. | ||||
|         R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); | ||||
|  | ||||
|         // HACK: Manually open the pages. | ||||
|         HACK_OpenPages(end_partial_page, 1); | ||||
|     } | ||||
|  | ||||
|     // Update memory blocks to reflect our changes | ||||
| @@ -1201,6 +1211,9 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState | ||||
|     const size_t aligned_size = aligned_end - aligned_start; | ||||
|     const size_t aligned_num_pages = aligned_size / PageSize; | ||||
|  | ||||
|     // HACK: Manually close the pages. | ||||
|     HACK_ClosePages(aligned_start, aligned_num_pages); | ||||
|  | ||||
|     // Unmap the pages. | ||||
|     R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||||
|  | ||||
| @@ -1488,6 +1501,17 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi | ||||
|     } | ||||
| } | ||||
|  | ||||
| void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { | ||||
|     m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); | ||||
| } | ||||
|  | ||||
| void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) { | ||||
|     for (size_t index = 0; index < num_pages; ++index) { | ||||
|         const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize)); | ||||
|         m_system.Kernel().MemoryManager().Close(paddr, 1); | ||||
|     } | ||||
| } | ||||
|  | ||||
| Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||
|     // Lock the physical memory lock. | ||||
|     KScopedLightLock phys_lk(m_map_physical_memory_lock); | ||||
| @@ -1548,7 +1572,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||
|             R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||||
|  | ||||
|             // Allocate pages for the new memory. | ||||
|             KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
|             KPageGroup pg; | ||||
|             R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( | ||||
|                 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); | ||||
|  | ||||
| @@ -1626,7 +1650,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||
|                 KScopedPageTableUpdater updater(this); | ||||
|  | ||||
|                 // Prepare to iterate over the memory. | ||||
|                 auto pg_it = pg.begin(); | ||||
|                 auto pg_it = pg.Nodes().begin(); | ||||
|                 PAddr pg_phys_addr = pg_it->GetAddress(); | ||||
|                 size_t pg_pages = pg_it->GetNumPages(); | ||||
|  | ||||
| @@ -1656,6 +1680,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||
|                                              last_unmap_address + 1 - cur_address) / | ||||
|                                     PageSize; | ||||
|  | ||||
|                                 // HACK: Manually close the pages. | ||||
|                                 HACK_ClosePages(cur_address, cur_pages); | ||||
|  | ||||
|                                 // Unmap. | ||||
|                                 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | ||||
|                                                OperationType::Unmap) | ||||
| @@ -1676,7 +1703,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||
|                     // Release any remaining unmapped memory. | ||||
|                     m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); | ||||
|                     m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); | ||||
|                     for (++pg_it; pg_it != pg.end(); ++pg_it) { | ||||
|                     for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { | ||||
|                         m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), | ||||
|                                                                     pg_it->GetNumPages()); | ||||
|                         m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), | ||||
| @@ -1704,7 +1731,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||
|                             // Check if we're at the end of the physical block. | ||||
|                             if (pg_pages == 0) { | ||||
|                                 // Ensure there are more pages to map. | ||||
|                                 ASSERT(pg_it != pg.end()); | ||||
|                                 ASSERT(pg_it != pg.Nodes().end()); | ||||
|  | ||||
|                                 // Advance our physical block. | ||||
|                                 ++pg_it; | ||||
| @@ -1715,7 +1742,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||
|                             // Map whatever we can. | ||||
|                             const size_t cur_pages = std::min(pg_pages, map_pages); | ||||
|                             R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | ||||
|                                           OperationType::MapFirst, pg_phys_addr)); | ||||
|                                           OperationType::Map, pg_phys_addr)); | ||||
|  | ||||
|                             // HACK: Manually open the pages. | ||||
|                             HACK_OpenPages(pg_phys_addr, cur_pages); | ||||
|  | ||||
|                             // Advance. | ||||
|                             cur_address += cur_pages * PageSize; | ||||
| @@ -1858,6 +1888,9 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | ||||
|                                               last_address + 1 - cur_address) / | ||||
|                                      PageSize; | ||||
|  | ||||
|             // HACK: Manually close the pages. | ||||
|             HACK_ClosePages(cur_address, cur_pages); | ||||
|  | ||||
|             // Unmap. | ||||
|             ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) | ||||
|                        .IsSuccess()); | ||||
| @@ -1887,8 +1920,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|  | ||||
| Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||||
|                              size_t size) { | ||||
| Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { | ||||
|     // Lock the table. | ||||
|     KScopedLightLock lk(m_general_lock); | ||||
|  | ||||
| @@ -1909,73 +1941,53 @@ Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_ad | ||||
|                                  KMemoryAttribute::None)); | ||||
|  | ||||
|     // Create an update allocator for the source. | ||||
|     Result src_allocator_result; | ||||
|     Result src_allocator_result{ResultSuccess}; | ||||
|     KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||||
|                                                      m_memory_block_slab_manager, | ||||
|                                                      num_src_allocator_blocks); | ||||
|     R_TRY(src_allocator_result); | ||||
|  | ||||
|     // Create an update allocator for the destination. | ||||
|     Result dst_allocator_result; | ||||
|     Result dst_allocator_result{ResultSuccess}; | ||||
|     KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||||
|                                                      m_memory_block_slab_manager, | ||||
|                                                      num_dst_allocator_blocks); | ||||
|     R_TRY(dst_allocator_result); | ||||
|  | ||||
|     // Map the memory. | ||||
|     KPageGroup page_linked_list; | ||||
|     const size_t num_pages{size / PageSize}; | ||||
|     const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | ||||
|         KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | ||||
|     const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; | ||||
|  | ||||
|     AddRegionToPages(src_address, num_pages, page_linked_list); | ||||
|     { | ||||
|         // Determine the number of pages being operated on. | ||||
|         const size_t num_pages = size / PageSize; | ||||
|  | ||||
|         // Create page groups for the memory being unmapped. | ||||
|         KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
|  | ||||
|         // Create the page group representing the source. | ||||
|         R_TRY(this->MakePageGroup(pg, src_address, num_pages)); | ||||
|  | ||||
|         // We're going to perform an update, so create a helper. | ||||
|         KScopedPageTableUpdater updater(this); | ||||
|  | ||||
|         // Reprotect the source as kernel-read/not mapped. | ||||
|         const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | ||||
|             KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | ||||
|         const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; | ||||
|         const KPageProperties src_properties = {new_src_perm, false, false, | ||||
|                                                 DisableMergeAttribute::DisableHeadBodyTail}; | ||||
|         R_TRY(this->Operate(src_address, num_pages, src_properties.perm, | ||||
|                             OperationType::ChangePermissions)); | ||||
|         auto block_guard = detail::ScopeExit([&] { | ||||
|             Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, | ||||
|                     OperationType::ChangePermissions); | ||||
|         }); | ||||
|         R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions)); | ||||
|         R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite)); | ||||
|  | ||||
|         // Ensure that we unprotect the source pages on failure. | ||||
|         ON_RESULT_FAILURE { | ||||
|             const KPageProperties unprotect_properties = { | ||||
|                 KMemoryPermission::UserReadWrite, false, false, | ||||
|                 DisableMergeAttribute::EnableHeadBodyTail}; | ||||
|             ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm, | ||||
|                                  OperationType::ChangePermissions) == ResultSuccess); | ||||
|         }; | ||||
|  | ||||
|         // Map the alias pages. | ||||
|         const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false, | ||||
|                                                     DisableMergeAttribute::DisableHead}; | ||||
|         R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, | ||||
|                                      false)); | ||||
|  | ||||
|         // Apply the memory block updates. | ||||
|         m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, | ||||
|                                       src_state, new_src_perm, new_src_attr, | ||||
|                                       KMemoryBlockDisableMergeAttribute::Locked, | ||||
|                                       KMemoryBlockDisableMergeAttribute::None); | ||||
|         m_memory_block_manager.Update( | ||||
|             std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack, | ||||
|             KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||||
|             KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); | ||||
|         block_guard.Cancel(); | ||||
|     } | ||||
|  | ||||
|     // Apply the memory block updates. | ||||
|     m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, | ||||
|                                   new_src_perm, new_src_attr, | ||||
|                                   KMemoryBlockDisableMergeAttribute::Locked, | ||||
|                                   KMemoryBlockDisableMergeAttribute::None); | ||||
|     m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | ||||
|                                   KMemoryState::Stack, KMemoryPermission::UserReadWrite, | ||||
|                                   KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||||
|                                   KMemoryBlockDisableMergeAttribute::None); | ||||
|  | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|  | ||||
| Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||||
|                                size_t size) { | ||||
| Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { | ||||
|     // Lock the table. | ||||
|     KScopedLightLock lk(m_general_lock); | ||||
|  | ||||
| @@ -1997,208 +2009,108 @@ Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_ | ||||
|         KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); | ||||
|  | ||||
|     // Create an update allocator for the source. | ||||
|     Result src_allocator_result; | ||||
|     Result src_allocator_result{ResultSuccess}; | ||||
|     KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||||
|                                                      m_memory_block_slab_manager, | ||||
|                                                      num_src_allocator_blocks); | ||||
|     R_TRY(src_allocator_result); | ||||
|  | ||||
|     // Create an update allocator for the destination. | ||||
|     Result dst_allocator_result; | ||||
|     Result dst_allocator_result{ResultSuccess}; | ||||
|     KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||||
|                                                      m_memory_block_slab_manager, | ||||
|                                                      num_dst_allocator_blocks); | ||||
|     R_TRY(dst_allocator_result); | ||||
|  | ||||
|     // Unmap the memory. | ||||
|     KPageGroup src_pages; | ||||
|     KPageGroup dst_pages; | ||||
|     const size_t num_pages{size / PageSize}; | ||||
|  | ||||
|     AddRegionToPages(src_address, num_pages, src_pages); | ||||
|     AddRegionToPages(dst_address, num_pages, dst_pages); | ||||
|  | ||||
|     R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); | ||||
|  | ||||
|     { | ||||
|         // Determine the number of pages being operated on. | ||||
|         const size_t num_pages = size / PageSize; | ||||
|         auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); | ||||
|  | ||||
|         // Create page groups for the memory being unmapped. | ||||
|         KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
|         R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||||
|         R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, | ||||
|                       OperationType::ChangePermissions)); | ||||
|  | ||||
|         // Create the page group representing the destination. | ||||
|         R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); | ||||
|         block_guard.Cancel(); | ||||
|     } | ||||
|  | ||||
|         // Ensure the page group is the valid for the source. | ||||
|         R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion); | ||||
|     // Apply the memory block updates. | ||||
|     m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, | ||||
|                                   KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||||
|                                   KMemoryBlockDisableMergeAttribute::None, | ||||
|                                   KMemoryBlockDisableMergeAttribute::Locked); | ||||
|     m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | ||||
|                                   KMemoryState::None, KMemoryPermission::None, | ||||
|                                   KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||||
|                                   KMemoryBlockDisableMergeAttribute::Normal); | ||||
|  | ||||
|         // We're going to perform an update, so create a helper. | ||||
|         KScopedPageTableUpdater updater(this); | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|  | ||||
|         // Unmap the aliased copy of the pages. | ||||
|         const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false, | ||||
|                                                       DisableMergeAttribute::None}; | ||||
|         R_TRY( | ||||
|             this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap)); | ||||
| Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, | ||||
|                             KMemoryPermission perm) { | ||||
|     ASSERT(this->IsLockedByCurrentThread()); | ||||
|  | ||||
|         // Ensure that we re-map the aliased pages on failure. | ||||
|         ON_RESULT_FAILURE { | ||||
|             this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg); | ||||
|         }; | ||||
|     VAddr cur_addr{addr}; | ||||
|  | ||||
|         // Try to set the permissions for the source pages back to what they should be. | ||||
|         const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false, | ||||
|                                                 DisableMergeAttribute::EnableAndMergeHeadBodyTail}; | ||||
|         R_TRY(this->Operate(src_address, num_pages, src_properties.perm, | ||||
|                             OperationType::ChangePermissions)); | ||||
|     for (const auto& node : page_linked_list.Nodes()) { | ||||
|         if (const auto result{ | ||||
|                 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; | ||||
|             result.IsError()) { | ||||
|             const size_t num_pages{(addr - cur_addr) / PageSize}; | ||||
|  | ||||
|         // Apply the memory block updates. | ||||
|         m_memory_block_manager.Update( | ||||
|             std::addressof(src_allocator), src_address, num_pages, src_state, | ||||
|             KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||||
|             KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); | ||||
|         m_memory_block_manager.Update( | ||||
|             std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, | ||||
|             KMemoryPermission::None, KMemoryAttribute::None, | ||||
|             KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); | ||||
|             ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) | ||||
|                        .IsSuccess()); | ||||
|  | ||||
|             R_RETURN(result); | ||||
|         } | ||||
|  | ||||
|         cur_addr += node.GetNumPages() * PageSize; | ||||
|     } | ||||
|  | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|  | ||||
| Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, | ||||
|                                            size_t num_pages, KMemoryPermission perm) { | ||||
|     ASSERT(this->IsLockedByCurrentThread()); | ||||
| Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, | ||||
|                             KMemoryPermission perm) { | ||||
|     // Check that the map is in range. | ||||
|     const size_t num_pages{page_linked_list.GetNumPages()}; | ||||
|     const size_t size{num_pages * PageSize}; | ||||
|     R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||||
|  | ||||
|     // Create a page group to hold the pages we allocate. | ||||
|     KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
|     // Lock the table. | ||||
|     KScopedLightLock lk(m_general_lock); | ||||
|  | ||||
|     // Allocate the pages. | ||||
|     R_TRY( | ||||
|         m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option)); | ||||
|     // Check the memory state. | ||||
|     R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, | ||||
|                                  KMemoryPermission::None, KMemoryPermission::None, | ||||
|                                  KMemoryAttribute::None, KMemoryAttribute::None)); | ||||
|  | ||||
|     // Ensure that the page group is closed when we're done working with it. | ||||
|     SCOPE_EXIT({ pg.Close(); }); | ||||
|  | ||||
|     // Clear all pages. | ||||
|     for (const auto& it : pg) { | ||||
|         std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, | ||||
|                     it.GetSize()); | ||||
|     } | ||||
|     // Create an update allocator. | ||||
|     Result allocator_result{ResultSuccess}; | ||||
|     KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||||
|                                                  m_memory_block_slab_manager); | ||||
|  | ||||
|     // Map the pages. | ||||
|     R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup)); | ||||
| } | ||||
|     R_TRY(MapPages(address, page_linked_list, perm)); | ||||
|  | ||||
| Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||||
|                                     const KPageGroup& pg, const KPageProperties properties, | ||||
|                                     bool reuse_ll) { | ||||
|     ASSERT(this->IsLockedByCurrentThread()); | ||||
|     // Update the blocks. | ||||
|     m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, | ||||
|                                   KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||||
|                                   KMemoryBlockDisableMergeAttribute::None); | ||||
|  | ||||
|     // Note the current address, so that we can iterate. | ||||
|     const KProcessAddress start_address = address; | ||||
|     KProcessAddress cur_address = address; | ||||
|  | ||||
|     // Ensure that we clean up on failure. | ||||
|     ON_RESULT_FAILURE { | ||||
|         ASSERT(!reuse_ll); | ||||
|         if (cur_address != start_address) { | ||||
|             const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||||
|                                                       DisableMergeAttribute::None}; | ||||
|             ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize, | ||||
|                                  unmap_properties.perm, OperationType::Unmap) == ResultSuccess); | ||||
|         } | ||||
|     }; | ||||
|  | ||||
|     // Iterate, mapping all pages in the group. | ||||
|     for (const auto& block : pg) { | ||||
|         // Map and advance. | ||||
|         const KPageProperties cur_properties = | ||||
|             (cur_address == start_address) | ||||
|                 ? properties | ||||
|                 : KPageProperties{properties.perm, properties.io, properties.uncached, | ||||
|                                   DisableMergeAttribute::None}; | ||||
|         this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map, | ||||
|                       block.GetAddress()); | ||||
|         cur_address += block.GetSize(); | ||||
|     } | ||||
|  | ||||
|     // We succeeded! | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|  | ||||
| void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | ||||
|                                 const KPageGroup& pg) { | ||||
|     ASSERT(this->IsLockedByCurrentThread()); | ||||
|  | ||||
|     // Note the current address, so that we can iterate. | ||||
|     const KProcessAddress start_address = address; | ||||
|     const KProcessAddress last_address = start_address + size - 1; | ||||
|     const KProcessAddress end_address = last_address + 1; | ||||
|  | ||||
|     // Iterate over the memory. | ||||
|     auto pg_it = pg.begin(); | ||||
|     ASSERT(pg_it != pg.end()); | ||||
|  | ||||
|     KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); | ||||
|     size_t pg_pages = pg_it->GetNumPages(); | ||||
|  | ||||
|     auto it = m_memory_block_manager.FindIterator(start_address); | ||||
|     while (true) { | ||||
|         // Check that the iterator is valid. | ||||
|         ASSERT(it != m_memory_block_manager.end()); | ||||
|  | ||||
|         // Get the memory info. | ||||
|         const KMemoryInfo info = it->GetMemoryInfo(); | ||||
|  | ||||
|         // Determine the range to map. | ||||
|         KProcessAddress map_address = std::max(info.GetAddress(), start_address); | ||||
|         const KProcessAddress map_end_address = std::min(info.GetEndAddress(), end_address); | ||||
|         ASSERT(map_end_address != map_address); | ||||
|  | ||||
|         // Determine if we should disable head merge. | ||||
|         const bool disable_head_merge = | ||||
|             info.GetAddress() >= start_address && | ||||
|             True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal); | ||||
|         const KPageProperties map_properties = { | ||||
|             info.GetPermission(), false, false, | ||||
|             disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None}; | ||||
|  | ||||
|         // While we have pages to map, map them. | ||||
|         size_t map_pages = (map_end_address - map_address) / PageSize; | ||||
|         while (map_pages > 0) { | ||||
|             // Check if we're at the end of the physical block. | ||||
|             if (pg_pages == 0) { | ||||
|                 // Ensure there are more pages to map. | ||||
|                 ASSERT(pg_it != pg.end()); | ||||
|  | ||||
|                 // Advance our physical block. | ||||
|                 ++pg_it; | ||||
|                 pg_phys_addr = pg_it->GetAddress(); | ||||
|                 pg_pages = pg_it->GetNumPages(); | ||||
|             } | ||||
|  | ||||
|             // Map whatever we can. | ||||
|             const size_t cur_pages = std::min(pg_pages, map_pages); | ||||
|             ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map, | ||||
|                                  pg_phys_addr) == ResultSuccess); | ||||
|  | ||||
|             // Advance. | ||||
|             map_address += cur_pages * PageSize; | ||||
|             map_pages -= cur_pages; | ||||
|  | ||||
|             pg_phys_addr += cur_pages * PageSize; | ||||
|             pg_pages -= cur_pages; | ||||
|         } | ||||
|  | ||||
|         // Check if we're done. | ||||
|         if (last_address <= info.GetLastAddress()) { | ||||
|             break; | ||||
|         } | ||||
|  | ||||
|         // Advance. | ||||
|         ++it; | ||||
|     } | ||||
|  | ||||
|     // Check that we re-mapped precisely the page group. | ||||
|     ASSERT((++pg_it) == pg.end()); | ||||
| } | ||||
|  | ||||
| Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||||
|                             KPhysicalAddress phys_addr, bool is_pa_valid, | ||||
|                             KProcessAddress region_start, size_t region_num_pages, | ||||
| Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, | ||||
|                             bool is_pa_valid, VAddr region_start, size_t region_num_pages, | ||||
|                             KMemoryState state, KMemoryPermission perm) { | ||||
|     ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); | ||||
|  | ||||
| @@ -2211,30 +2123,26 @@ Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t | ||||
|     KScopedLightLock lk(m_general_lock); | ||||
|  | ||||
|     // Find a random address to map at. | ||||
|     KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, | ||||
|                                               0, this->GetNumGuardPages()); | ||||
|     VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, | ||||
|                                     this->GetNumGuardPages()); | ||||
|     R_UNLESS(addr != 0, ResultOutOfMemory); | ||||
|     ASSERT(Common::IsAligned(addr, alignment)); | ||||
|     ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||||
|     ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, | ||||
|                                   KMemoryPermission::None, KMemoryPermission::None, | ||||
|                                   KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess); | ||||
|                                   KMemoryAttribute::None, KMemoryAttribute::None) | ||||
|                .IsSuccess()); | ||||
|  | ||||
|     // Create an update allocator. | ||||
|     Result allocator_result; | ||||
|     Result allocator_result{ResultSuccess}; | ||||
|     KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||||
|                                                  m_memory_block_slab_manager); | ||||
|     R_TRY(allocator_result); | ||||
|  | ||||
|     // We're going to perform an update, so create a helper. | ||||
|     KScopedPageTableUpdater updater(this); | ||||
|  | ||||
|     // Perform mapping operation. | ||||
|     if (is_pa_valid) { | ||||
|         const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||||
|         R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr)); | ||||
|         R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); | ||||
|     } else { | ||||
|         R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm)); | ||||
|         UNIMPLEMENTED(); | ||||
|     } | ||||
|  | ||||
|     // Update the blocks. | ||||
| @@ -2247,45 +2155,28 @@ Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|  | ||||
| Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||||
|                             KMemoryPermission perm) { | ||||
|     // Check that the map is in range. | ||||
|     const size_t size = num_pages * PageSize; | ||||
|     R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||||
| Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { | ||||
|     ASSERT(this->IsLockedByCurrentThread()); | ||||
|  | ||||
|     // Lock the table. | ||||
|     KScopedLightLock lk(m_general_lock); | ||||
|     VAddr cur_addr{addr}; | ||||
|  | ||||
|     // Check the memory state. | ||||
|     size_t num_allocator_blocks; | ||||
|     R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||||
|                                  KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||||
|                                  KMemoryPermission::None, KMemoryAttribute::None, | ||||
|                                  KMemoryAttribute::None)); | ||||
|     for (const auto& node : page_linked_list.Nodes()) { | ||||
|         if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, | ||||
|                                       OperationType::Unmap)}; | ||||
|             result.IsError()) { | ||||
|             R_RETURN(result); | ||||
|         } | ||||
|  | ||||
|     // Create an update allocator. | ||||
|     Result allocator_result; | ||||
|     KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||||
|                                                  m_memory_block_slab_manager, num_allocator_blocks); | ||||
|     R_TRY(allocator_result); | ||||
|  | ||||
|     // We're going to perform an update, so create a helper. | ||||
|     KScopedPageTableUpdater updater(this); | ||||
|  | ||||
|     // Map the pages. | ||||
|     R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm)); | ||||
|  | ||||
|     // Update the blocks. | ||||
|     m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, | ||||
|                                   KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||||
|                                   KMemoryBlockDisableMergeAttribute::None); | ||||
|         cur_addr += node.GetNumPages() * PageSize; | ||||
|     } | ||||
|  | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|  | ||||
| Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { | ||||
| Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { | ||||
|     // Check that the unmap is in range. | ||||
|     const size_t size = num_pages * PageSize; | ||||
|     const size_t num_pages{page_linked_list.GetNumPages()}; | ||||
|     const size_t size{num_pages * PageSize}; | ||||
|     R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||||
|  | ||||
|     // Lock the table. | ||||
| @@ -2299,18 +2190,13 @@ Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemory | ||||
|                                  KMemoryAttribute::None)); | ||||
|  | ||||
|     // Create an update allocator. | ||||
|     Result allocator_result; | ||||
|     Result allocator_result{ResultSuccess}; | ||||
|     KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||||
|                                                  m_memory_block_slab_manager, num_allocator_blocks); | ||||
|     R_TRY(allocator_result); | ||||
|  | ||||
|     // We're going to perform an update, so create a helper. | ||||
|     KScopedPageTableUpdater updater(this); | ||||
|  | ||||
|     // Perform the unmap. | ||||
|     const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||||
|                                               DisableMergeAttribute::None}; | ||||
|     R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap)); | ||||
|     R_TRY(UnmapPages(address, page_linked_list)); | ||||
|  | ||||
|     // Update the blocks. | ||||
|     m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||||
| @@ -2321,130 +2207,29 @@ Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemory | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|  | ||||
| Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | ||||
|                                 KProcessAddress region_start, size_t region_num_pages, | ||||
|                                 KMemoryState state, KMemoryPermission perm) { | ||||
|     ASSERT(!this->IsLockedByCurrentThread()); | ||||
|  | ||||
|     // Ensure this is a valid map request. | ||||
|     const size_t num_pages = pg.GetNumPages(); | ||||
|     R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), | ||||
|              ResultInvalidCurrentMemory); | ||||
|     R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | ||||
|  | ||||
|     // Lock the table. | ||||
|     KScopedLightLock lk(m_general_lock); | ||||
|  | ||||
|     // Find a random address to map at. | ||||
|     KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, | ||||
|                                               0, this->GetNumGuardPages()); | ||||
|     R_UNLESS(addr != 0, ResultOutOfMemory); | ||||
|     ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||||
|     ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, | ||||
|                                   KMemoryPermission::None, KMemoryPermission::None, | ||||
|                                   KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess); | ||||
|  | ||||
|     // Create an update allocator. | ||||
|     Result allocator_result; | ||||
|     KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||||
|                                                  m_memory_block_slab_manager); | ||||
|     R_TRY(allocator_result); | ||||
|  | ||||
|     // We're going to perform an update, so create a helper. | ||||
|     KScopedPageTableUpdater updater(this); | ||||
|  | ||||
|     // Perform mapping operation. | ||||
|     const KPageProperties properties = {perm, state == KMemoryState::Io, false, | ||||
|                                         DisableMergeAttribute::DisableHead}; | ||||
|     R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||||
|  | ||||
|     // Update the blocks. | ||||
|     m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||||
|                                   KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||||
|                                   KMemoryBlockDisableMergeAttribute::None); | ||||
|  | ||||
|     // We successfully mapped the pages. | ||||
|     *out_addr = addr; | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|  | ||||
| Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state, | ||||
|                                 KMemoryPermission perm) { | ||||
|     ASSERT(!this->IsLockedByCurrentThread()); | ||||
|  | ||||
|     // Ensure this is a valid map request. | ||||
|     const size_t num_pages = pg.GetNumPages(); | ||||
| Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { | ||||
|     // Check that the unmap is in range. | ||||
|     const size_t size = num_pages * PageSize; | ||||
|     R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); | ||||
|     R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||||
|  | ||||
|     // Lock the table. | ||||
|     KScopedLightLock lk(m_general_lock); | ||||
|  | ||||
|     // Check if state allows us to map. | ||||
|     size_t num_allocator_blocks; | ||||
|     R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size, | ||||
|                                  KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||||
|                                  KMemoryPermission::None, KMemoryAttribute::None, | ||||
|                                  KMemoryAttribute::None)); | ||||
|  | ||||
|     // Create an update allocator. | ||||
|     Result allocator_result; | ||||
|     KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||||
|                                                  m_memory_block_slab_manager, num_allocator_blocks); | ||||
|     R_TRY(allocator_result); | ||||
|  | ||||
|     // We're going to perform an update, so create a helper. | ||||
|     KScopedPageTableUpdater updater(this); | ||||
|  | ||||
|     // Perform mapping operation. | ||||
|     const KPageProperties properties = {perm, state == KMemoryState::Io, false, | ||||
|                                         DisableMergeAttribute::DisableHead}; | ||||
|     R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||||
|  | ||||
|     // Update the blocks. | ||||
|     m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||||
|                                   KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||||
|                                   KMemoryBlockDisableMergeAttribute::None); | ||||
|  | ||||
|     // We successfully mapped the pages. | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|  | ||||
| Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, | ||||
|                                   KMemoryState state) { | ||||
|     ASSERT(!this->IsLockedByCurrentThread()); | ||||
|  | ||||
|     // Ensure this is a valid unmap request. | ||||
|     const size_t num_pages = pg.GetNumPages(); | ||||
|     const size_t size = num_pages * PageSize; | ||||
|     R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||||
|  | ||||
|     // Lock the table. | ||||
|     KScopedLightLock lk(m_general_lock); | ||||
|  | ||||
|     // Check if state allows us to unmap. | ||||
|     size_t num_allocator_blocks; | ||||
|     // Check the memory state. | ||||
|     size_t num_allocator_blocks{}; | ||||
|     R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||||
|                                  KMemoryState::All, state, KMemoryPermission::None, | ||||
|                                  KMemoryPermission::None, KMemoryAttribute::All, | ||||
|                                  KMemoryAttribute::None)); | ||||
|  | ||||
|     // Check that the page group is valid. | ||||
|     R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory); | ||||
|  | ||||
|     // Create an update allocator. | ||||
|     Result allocator_result; | ||||
|     Result allocator_result{ResultSuccess}; | ||||
|     KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||||
|                                                  m_memory_block_slab_manager, num_allocator_blocks); | ||||
|     R_TRY(allocator_result); | ||||
|  | ||||
|     // We're going to perform an update, so create a helper. | ||||
|     KScopedPageTableUpdater updater(this); | ||||
|  | ||||
|     // Perform unmapping operation. | ||||
|     const KPageProperties properties = {KMemoryPermission::None, false, false, | ||||
|                                         DisableMergeAttribute::None}; | ||||
|     R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap)); | ||||
|     // Perform the unmap. | ||||
|     R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||||
|  | ||||
|     // Update the blocks. | ||||
|     m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||||
| @@ -2742,13 +2527,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | ||||
|     R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||||
|  | ||||
|     // Allocate pages for the heap extension. | ||||
|     KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
|     KPageGroup pg; | ||||
|     R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||||
|         &pg, allocation_size / PageSize, | ||||
|         KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); | ||||
|  | ||||
|     // Clear all the newly allocated pages. | ||||
|     for (const auto& it : pg) { | ||||
|     for (const auto& it : pg.Nodes()) { | ||||
|         std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, | ||||
|                     it.GetSize()); | ||||
|     } | ||||
| @@ -2804,6 +2589,42 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | ||||
|     } | ||||
| } | ||||
|  | ||||
| ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align, | ||||
|                                                   bool is_map_only, VAddr region_start, | ||||
|                                                   size_t region_num_pages, KMemoryState state, | ||||
|                                                   KMemoryPermission perm, PAddr map_addr) { | ||||
|     KScopedLightLock lk(m_general_lock); | ||||
|  | ||||
|     R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state), | ||||
|              ResultInvalidCurrentMemory); | ||||
|     R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory); | ||||
|     const VAddr addr{ | ||||
|         AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; | ||||
|     R_UNLESS(addr, ResultOutOfMemory); | ||||
|  | ||||
|     // Create an update allocator. | ||||
|     Result allocator_result{ResultSuccess}; | ||||
|     KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||||
|                                                  m_memory_block_slab_manager); | ||||
|  | ||||
|     if (is_map_only) { | ||||
|         R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | ||||
|     } else { | ||||
|         KPageGroup page_group; | ||||
|         R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( | ||||
|             &page_group, needed_num_pages, | ||||
|             KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | ||||
|         R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | ||||
|     } | ||||
|  | ||||
|     // Update the blocks. | ||||
|     m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, | ||||
|                                   KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||||
|                                   KMemoryBlockDisableMergeAttribute::None); | ||||
|  | ||||
|     return addr; | ||||
| } | ||||
|  | ||||
| Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, | ||||
|                                                 KMemoryPermission perm, bool is_aligned, | ||||
|                                                 bool check_heap) { | ||||
| @@ -2974,28 +2795,19 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_ | ||||
|     ASSERT(num_pages > 0); | ||||
|     ASSERT(num_pages == page_group.GetNumPages()); | ||||
|  | ||||
|     switch (operation) { | ||||
|     case OperationType::MapGroup: { | ||||
|         // We want to maintain a new reference to every page in the group. | ||||
|         KScopedPageGroup spg(page_group); | ||||
|     for (const auto& node : page_group.Nodes()) { | ||||
|         const size_t size{node.GetNumPages() * PageSize}; | ||||
|  | ||||
|         for (const auto& node : page_group) { | ||||
|             const size_t size{node.GetNumPages() * PageSize}; | ||||
|  | ||||
|             // Map the pages. | ||||
|         switch (operation) { | ||||
|         case OperationType::MapGroup: | ||||
|             m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); | ||||
|  | ||||
|             addr += size; | ||||
|             break; | ||||
|         default: | ||||
|             ASSERT(false); | ||||
|             break; | ||||
|         } | ||||
|  | ||||
|         // We succeeded! We want to persist the reference to the pages. | ||||
|         spg.CancelClose(); | ||||
|  | ||||
|         break; | ||||
|     } | ||||
|     default: | ||||
|         ASSERT(false); | ||||
|         break; | ||||
|         addr += size; | ||||
|     } | ||||
|  | ||||
|     R_SUCCEED(); | ||||
| @@ -3010,29 +2822,13 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | ||||
|     ASSERT(ContainsPages(addr, num_pages)); | ||||
|  | ||||
|     switch (operation) { | ||||
|     case OperationType::Unmap: { | ||||
|         // Ensure that any pages we track close on exit. | ||||
|         KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()}; | ||||
|         SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); | ||||
|  | ||||
|         this->AddRegionToPages(addr, num_pages, pages_to_close); | ||||
|     case OperationType::Unmap: | ||||
|         m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); | ||||
|         break; | ||||
|     } | ||||
|     case OperationType::MapFirst: | ||||
|     case OperationType::Map: { | ||||
|         ASSERT(map_addr); | ||||
|         ASSERT(Common::IsAligned(map_addr, PageSize)); | ||||
|         m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); | ||||
|  | ||||
|         // Open references to pages, if we should. | ||||
|         if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { | ||||
|             if (operation == OperationType::MapFirst) { | ||||
|                 m_kernel.MemoryManager().OpenFirst(map_addr, num_pages); | ||||
|             } else { | ||||
|                 m_kernel.MemoryManager().Open(map_addr, num_pages); | ||||
|             } | ||||
|         } | ||||
|         break; | ||||
|     } | ||||
|     case OperationType::Separate: { | ||||
|   | ||||
| @@ -24,36 +24,12 @@ class System; | ||||
|  | ||||
| namespace Kernel { | ||||
|  | ||||
| enum class DisableMergeAttribute : u8 { | ||||
|     None = (0U << 0), | ||||
|     DisableHead = (1U << 0), | ||||
|     DisableHeadAndBody = (1U << 1), | ||||
|     EnableHeadAndBody = (1U << 2), | ||||
|     DisableTail = (1U << 3), | ||||
|     EnableTail = (1U << 4), | ||||
|     EnableAndMergeHeadBodyTail = (1U << 5), | ||||
|     EnableHeadBodyTail = EnableHeadAndBody | EnableTail, | ||||
|     DisableHeadBodyTail = DisableHeadAndBody | DisableTail, | ||||
| }; | ||||
|  | ||||
| struct KPageProperties { | ||||
|     KMemoryPermission perm; | ||||
|     bool io; | ||||
|     bool uncached; | ||||
|     DisableMergeAttribute disable_merge_attributes; | ||||
| }; | ||||
| static_assert(std::is_trivial_v<KPageProperties>); | ||||
| static_assert(sizeof(KPageProperties) == sizeof(u32)); | ||||
|  | ||||
| class KBlockInfoManager; | ||||
| class KMemoryBlockManager; | ||||
| class KResourceLimit; | ||||
| class KSystemResource; | ||||
|  | ||||
| class KPageTable final { | ||||
| protected: | ||||
|     struct PageLinkedList; | ||||
|  | ||||
| public: | ||||
|     enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll }; | ||||
|  | ||||
| @@ -81,12 +57,27 @@ public: | ||||
|     Result UnmapPhysicalMemory(VAddr addr, size_t size); | ||||
|     Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); | ||||
|     Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); | ||||
|     Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, | ||||
|                     KMemoryPermission perm); | ||||
|     Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, | ||||
|                     KMemoryState state, KMemoryPermission perm) { | ||||
|         R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, | ||||
|                                 this->GetRegionAddress(state), | ||||
|                                 this->GetRegionSize(state) / PageSize, state, perm)); | ||||
|     } | ||||
|     Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); | ||||
|     Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state); | ||||
|     Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); | ||||
|     KMemoryInfo QueryInfo(VAddr addr); | ||||
|     Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); | ||||
|     Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); | ||||
|     Result SetMaxHeapSize(size_t size); | ||||
|     Result SetHeapSize(VAddr* out, size_t size); | ||||
|     ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, | ||||
|                                           VAddr region_start, size_t region_num_pages, | ||||
|                                           KMemoryState state, KMemoryPermission perm, | ||||
|                                           PAddr map_addr = 0); | ||||
|  | ||||
|     Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, | ||||
|                                         KMemoryPermission perm, bool is_aligned, bool check_heap); | ||||
|     Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap); | ||||
| @@ -116,46 +107,8 @@ public: | ||||
|         return *m_page_table_impl; | ||||
|     } | ||||
|  | ||||
|     KBlockInfoManager* GetBlockInfoManager() { | ||||
|         return m_block_info_manager; | ||||
|     } | ||||
|  | ||||
|     bool CanContain(VAddr addr, size_t size, KMemoryState state) const; | ||||
|  | ||||
|     Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||||
|                     KPhysicalAddress phys_addr, KProcessAddress region_start, | ||||
|                     size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { | ||||
|         R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, | ||||
|                                 region_num_pages, state, perm)); | ||||
|     } | ||||
|  | ||||
|     Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||||
|                     KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { | ||||
|         R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, | ||||
|                                 this->GetRegionAddress(state), | ||||
|                                 this->GetRegionSize(state) / PageSize, state, perm)); | ||||
|     } | ||||
|  | ||||
|     Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state, | ||||
|                     KMemoryPermission perm) { | ||||
|         R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false, | ||||
|                                 this->GetRegionAddress(state), | ||||
|                                 this->GetRegionSize(state) / PageSize, state, perm)); | ||||
|     } | ||||
|  | ||||
|     Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||||
|                     KMemoryPermission perm); | ||||
|     Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state); | ||||
|  | ||||
|     Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | ||||
|                         KProcessAddress region_start, size_t region_num_pages, KMemoryState state, | ||||
|                         KMemoryPermission perm); | ||||
|     Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state, | ||||
|                         KMemoryPermission perm); | ||||
|     Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state); | ||||
|     void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | ||||
|                         const KPageGroup& pg); | ||||
|  | ||||
| protected: | ||||
|     struct PageLinkedList { | ||||
|     private: | ||||
| @@ -209,9 +162,11 @@ private: | ||||
|     static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = | ||||
|         KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; | ||||
|  | ||||
|     Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||||
|                     KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, | ||||
|                     size_t region_num_pages, KMemoryState state, KMemoryPermission perm); | ||||
|     Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); | ||||
|     Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, | ||||
|                     bool is_pa_valid, VAddr region_start, size_t region_num_pages, | ||||
|                     KMemoryState state, KMemoryPermission perm); | ||||
|     Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); | ||||
|     bool IsRegionContiguous(VAddr addr, u64 size) const; | ||||
|     void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); | ||||
|     KMemoryInfo QueryInfoImpl(VAddr addr); | ||||
| @@ -306,10 +261,9 @@ private: | ||||
|     void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, | ||||
|                                                  size_t size, KMemoryPermission prot_perm); | ||||
|  | ||||
|     Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, | ||||
|                                    size_t num_pages, KMemoryPermission perm); | ||||
|     Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||||
|                             const KPageGroup& pg, const KPageProperties properties, bool reuse_ll); | ||||
|     // HACK: These will be removed once we automatically manage page reference counts. | ||||
|     void HACK_OpenPages(PAddr phys_addr, size_t num_pages); | ||||
|     void HACK_ClosePages(VAddr virt_addr, size_t num_pages); | ||||
|  | ||||
|     mutable KLightLock m_general_lock; | ||||
|     mutable KLightLock m_map_physical_memory_lock; | ||||
| @@ -534,7 +488,6 @@ private: | ||||
|     std::unique_ptr<Common::PageTable> m_page_table_impl; | ||||
|  | ||||
|     Core::System& m_system; | ||||
|     KernelCore& m_kernel; | ||||
| }; | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -417,8 +417,9 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | ||||
| } | ||||
|  | ||||
| void KProcess::Run(s32 main_thread_priority, u64 stack_size) { | ||||
|     ASSERT(AllocateMainThreadStack(stack_size) == ResultSuccess); | ||||
|     AllocateMainThreadStack(stack_size); | ||||
|     resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); | ||||
|     resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, main_thread_stack_size); | ||||
|  | ||||
|     const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; | ||||
|     ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); | ||||
| @@ -674,28 +675,20 @@ void KProcess::ChangeState(State new_state) { | ||||
| } | ||||
|  | ||||
| Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { | ||||
|     // Ensure that we haven't already allocated stack. | ||||
|     ASSERT(main_thread_stack_size == 0); | ||||
|     ASSERT(stack_size); | ||||
|  | ||||
|     // Ensure that we're allocating a valid stack. | ||||
|     stack_size = Common::AlignUp(stack_size, PageSize); | ||||
|     // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory); | ||||
|     R_UNLESS(stack_size + image_size >= image_size, ResultOutOfMemory); | ||||
|     // The kernel always ensures that the given stack size is page aligned. | ||||
|     main_thread_stack_size = Common::AlignUp(stack_size, PageSize); | ||||
|  | ||||
|     // Place a tentative reservation of memory for our new stack. | ||||
|     KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax, | ||||
|                                                stack_size); | ||||
|     R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached); | ||||
|     const VAddr start{page_table.GetStackRegionStart()}; | ||||
|     const std::size_t size{page_table.GetStackRegionEnd() - start}; | ||||
|  | ||||
|     // Allocate and map our stack. | ||||
|     if (stack_size) { | ||||
|         KProcessAddress stack_bottom; | ||||
|         R_TRY(page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, | ||||
|                                   KMemoryState::Stack, KMemoryPermission::UserReadWrite)); | ||||
|     CASCADE_RESULT(main_thread_stack_top, | ||||
|                    page_table.AllocateAndMapMemory( | ||||
|                        main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, | ||||
|                        KMemoryState::Stack, KMemoryPermission::UserReadWrite)); | ||||
|  | ||||
|         main_thread_stack_top = stack_bottom + stack_size; | ||||
|         main_thread_stack_size = stack_size; | ||||
|     } | ||||
|     main_thread_stack_top += main_thread_stack_size; | ||||
|  | ||||
|     R_SUCCEED(); | ||||
| } | ||||
|   | ||||
| @@ -13,7 +13,10 @@ | ||||
| namespace Kernel { | ||||
|  | ||||
| KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | ||||
| KSharedMemory::~KSharedMemory() = default; | ||||
|  | ||||
| KSharedMemory::~KSharedMemory() { | ||||
|     kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size); | ||||
| } | ||||
|  | ||||
| Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, | ||||
|                                  Svc::MemoryPermission owner_permission_, | ||||
| @@ -46,8 +49,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | ||||
|     R_UNLESS(physical_address != 0, ResultOutOfMemory); | ||||
|  | ||||
|     //! Insert the result into our page group. | ||||
|     page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager()); | ||||
|     page_group->AddBlock(physical_address, num_pages); | ||||
|     page_group.emplace(physical_address, num_pages); | ||||
|  | ||||
|     // Commit our reservation. | ||||
|     memory_reservation.Commit(); | ||||
| @@ -60,7 +62,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | ||||
|     is_initialized = true; | ||||
|  | ||||
|     // Clear all pages in the memory. | ||||
|     for (const auto& block : *page_group) { | ||||
|     for (const auto& block : page_group->Nodes()) { | ||||
|         std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); | ||||
|     } | ||||
|  | ||||
| @@ -69,8 +71,13 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | ||||
|  | ||||
| void KSharedMemory::Finalize() { | ||||
|     // Close and finalize the page group. | ||||
|     page_group->Close(); | ||||
|     page_group->Finalize(); | ||||
|     // page_group->Close(); | ||||
|     // page_group->Finalize(); | ||||
|  | ||||
|     //! HACK: Manually close. | ||||
|     for (const auto& block : page_group->Nodes()) { | ||||
|         kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages()); | ||||
|     } | ||||
|  | ||||
|     // Release the memory reservation. | ||||
|     resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); | ||||
| @@ -94,15 +101,15 @@ Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t m | ||||
|         R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission); | ||||
|     } | ||||
|  | ||||
|     return target_process.PageTable().MapPageGroup(address, *page_group, KMemoryState::Shared, | ||||
|                                                    ConvertToKMemoryPermission(map_perm)); | ||||
|     return target_process.PageTable().MapPages(address, *page_group, KMemoryState::Shared, | ||||
|                                                ConvertToKMemoryPermission(map_perm)); | ||||
| } | ||||
|  | ||||
| Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) { | ||||
|     // Validate the size. | ||||
|     R_UNLESS(size == unmap_size, ResultInvalidSize); | ||||
|  | ||||
|     return target_process.PageTable().UnmapPageGroup(address, *page_group, KMemoryState::Shared); | ||||
|     return target_process.PageTable().UnmapPages(address, *page_group, KMemoryState::Shared); | ||||
| } | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -14,7 +14,4 @@ constexpr std::size_t PageSize{1 << PageBits}; | ||||
|  | ||||
| using Page = std::array<u8, PageSize>; | ||||
|  | ||||
| using KPhysicalAddress = PAddr; | ||||
| using KProcessAddress = VAddr; | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -1485,15 +1485,15 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p | ||||
|              ResultInvalidMemoryRegion); | ||||
|  | ||||
|     // Create a new page group. | ||||
|     KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()}; | ||||
|     KPageGroup pg; | ||||
|     R_TRY(src_pt.MakeAndOpenPageGroup( | ||||
|         std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess, | ||||
|         KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, | ||||
|         KMemoryAttribute::All, KMemoryAttribute::None)); | ||||
|  | ||||
|     // Map the group. | ||||
|     R_TRY(dst_pt.MapPageGroup(dst_address, pg, KMemoryState::SharedCode, | ||||
|                               KMemoryPermission::UserReadWrite)); | ||||
|     R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode, | ||||
|                           KMemoryPermission::UserReadWrite)); | ||||
|  | ||||
|     return ResultSuccess; | ||||
| } | ||||
|   | ||||
| @@ -63,6 +63,8 @@ void IRS::ActivateIrsensor(Kernel::HLERequestContext& ctx) { | ||||
|     LOG_WARNING(Service_IRS, "(STUBBED) called, applet_resource_user_id={}", | ||||
|                 applet_resource_user_id); | ||||
|  | ||||
|     npad_device->SetPollingMode(Common::Input::PollingMode::IR); | ||||
|  | ||||
|     IPC::ResponseBuilder rb{ctx, 2}; | ||||
|     rb.Push(ResultSuccess); | ||||
| } | ||||
| @@ -74,6 +76,8 @@ void IRS::DeactivateIrsensor(Kernel::HLERequestContext& ctx) { | ||||
|     LOG_WARNING(Service_IRS, "(STUBBED) called, applet_resource_user_id={}", | ||||
|                 applet_resource_user_id); | ||||
|  | ||||
|     npad_device->SetPollingMode(Common::Input::PollingMode::Active); | ||||
|  | ||||
|     IPC::ResponseBuilder rb{ctx, 2}; | ||||
|     rb.Push(ResultSuccess); | ||||
| } | ||||
| @@ -525,6 +529,8 @@ void IRS::ActivateIrsensorWithFunctionLevel(Kernel::HLERequestContext& ctx) { | ||||
|     LOG_WARNING(Service_IRS, "(STUBBED) called, function_level={}, applet_resource_user_id={}", | ||||
|                 parameters.function_level.function_level, parameters.applet_resource_user_id); | ||||
|  | ||||
|     npad_device->SetPollingMode(Common::Input::PollingMode::IR); | ||||
|  | ||||
|     IPC::ResponseBuilder rb{ctx, 2}; | ||||
|     rb.Push(ResultSuccess); | ||||
| } | ||||
|   | ||||
| @@ -191,6 +191,10 @@ void Joycons::RegisterNewDevice(SDL_hid_device_info* device_info) { | ||||
|             .on_amiibo_data = {[this, port](const std::vector<u8>& amiibo_data) { | ||||
|                 OnAmiiboUpdate(port, amiibo_data); | ||||
|             }}, | ||||
|             .on_camera_data = {[this, port](const std::vector<u8>& camera_data, | ||||
|                                             Joycon::IrsResolution format) { | ||||
|                 OnCameraUpdate(port, camera_data, format); | ||||
|             }}, | ||||
|         }; | ||||
|  | ||||
|         handle->InitializeDevice(); | ||||
| @@ -265,9 +269,14 @@ Common::Input::DriverResult Joycons::SetLeds(const PadIdentifier& identifier, | ||||
|         handle->SetLedConfig(static_cast<u8>(led_config))); | ||||
| } | ||||
|  | ||||
| Common::Input::DriverResult Joycons::SetCameraFormat(const PadIdentifier& identifier_, | ||||
| Common::Input::DriverResult Joycons::SetCameraFormat(const PadIdentifier& identifier, | ||||
|                                                      Common::Input::CameraFormat camera_format) { | ||||
|     return Common::Input::DriverResult::NotSupported; | ||||
|     auto handle = GetHandle(identifier); | ||||
|     if (handle == nullptr) { | ||||
|         return Common::Input::DriverResult::InvalidHandle; | ||||
|     } | ||||
|     return static_cast<Common::Input::DriverResult>(handle->SetIrsConfig( | ||||
|         Joycon::IrsMode::ImageTransfer, static_cast<Joycon::IrsResolution>(camera_format))); | ||||
| }; | ||||
|  | ||||
| Common::Input::NfcState Joycons::SupportsNfc(const PadIdentifier& identifier_) const { | ||||
| @@ -288,18 +297,16 @@ Common::Input::DriverResult Joycons::SetPollingMode(const PadIdentifier& identif | ||||
|     } | ||||
|  | ||||
|     switch (polling_mode) { | ||||
|     case Common::Input::PollingMode::NFC: | ||||
|         return static_cast<Common::Input::DriverResult>(handle->SetNfcMode()); | ||||
|         break; | ||||
|     case Common::Input::PollingMode::Active: | ||||
|         return static_cast<Common::Input::DriverResult>(handle->SetActiveMode()); | ||||
|         break; | ||||
|     case Common::Input::PollingMode::Pasive: | ||||
|         return static_cast<Common::Input::DriverResult>(handle->SetPasiveMode()); | ||||
|         break; | ||||
|     case Common::Input::PollingMode::IR: | ||||
|         return static_cast<Common::Input::DriverResult>(handle->SetIrMode()); | ||||
|     case Common::Input::PollingMode::NFC: | ||||
|         return static_cast<Common::Input::DriverResult>(handle->SetNfcMode()); | ||||
|     case Common::Input::PollingMode::Ring: | ||||
|         return static_cast<Common::Input::DriverResult>(handle->SetRingConMode()); | ||||
|         break; | ||||
|     default: | ||||
|         return Common::Input::DriverResult::NotSupported; | ||||
|     } | ||||
| @@ -390,6 +397,12 @@ void Joycons::OnAmiiboUpdate(std::size_t port, const std::vector<u8>& amiibo_dat | ||||
|     SetNfc(identifier, {nfc_state, amiibo_data}); | ||||
| } | ||||
|  | ||||
| void Joycons::OnCameraUpdate(std::size_t port, const std::vector<u8>& camera_data, | ||||
|                              Joycon::IrsResolution format) { | ||||
|     const auto identifier = GetIdentifier(port, Joycon::ControllerType::Right); | ||||
|     SetCamera(identifier, {static_cast<Common::Input::CameraFormat>(format), camera_data}); | ||||
| } | ||||
|  | ||||
| std::shared_ptr<Joycon::JoyconDriver> Joycons::GetHandle(PadIdentifier identifier) const { | ||||
|     auto is_handle_active = [&](std::shared_ptr<Joycon::JoyconDriver> device) { | ||||
|         if (!device) { | ||||
|   | ||||
| @@ -17,6 +17,7 @@ struct Color; | ||||
| struct MotionData; | ||||
| enum class ControllerType; | ||||
| enum class DriverResult; | ||||
| enum class IrsResolution; | ||||
| class JoyconDriver; | ||||
| } // namespace InputCommon::Joycon | ||||
|  | ||||
| @@ -35,7 +36,7 @@ public: | ||||
|     Common::Input::DriverResult SetLeds(const PadIdentifier& identifier, | ||||
|                                         const Common::Input::LedStatus& led_status) override; | ||||
|  | ||||
|     Common::Input::DriverResult SetCameraFormat(const PadIdentifier& identifier_, | ||||
|     Common::Input::DriverResult SetCameraFormat(const PadIdentifier& identifier, | ||||
|                                                 Common::Input::CameraFormat camera_format) override; | ||||
|  | ||||
|     Common::Input::NfcState SupportsNfc(const PadIdentifier& identifier_) const override; | ||||
| @@ -81,6 +82,8 @@ private: | ||||
|                         const Joycon::MotionData& value); | ||||
|     void OnRingConUpdate(f32 ring_data); | ||||
|     void OnAmiiboUpdate(std::size_t port, const std::vector<u8>& amiibo_data); | ||||
|     void OnCameraUpdate(std::size_t port, const std::vector<u8>& camera_data, | ||||
|                         Joycon::IrsResolution format); | ||||
|  | ||||
|     /// Returns a JoyconHandle corresponding to a PadIdentifier | ||||
|     std::shared_ptr<Joycon::JoyconDriver> GetHandle(PadIdentifier identifier) const; | ||||
|   | ||||
| @@ -202,10 +202,15 @@ void JoyconDriver::OnNewData(std::span<u8> buffer) { | ||||
|         .min_value = ring_calibration.min_value, | ||||
|     }; | ||||
|  | ||||
|     if (irs_protocol->IsEnabled()) { | ||||
|         irs_protocol->RequestImage(buffer); | ||||
|         joycon_poller->UpdateCamera(irs_protocol->GetImage(), irs_protocol->GetIrsFormat()); | ||||
|     } | ||||
|  | ||||
|     if (nfc_protocol->IsEnabled()) { | ||||
|         if (amiibo_detected) { | ||||
|             if (!nfc_protocol->HasAmiibo()) { | ||||
|                 joycon_poller->updateAmiibo({}); | ||||
|                 joycon_poller->UpdateAmiibo({}); | ||||
|                 amiibo_detected = false; | ||||
|                 return; | ||||
|             } | ||||
| @@ -215,7 +220,7 @@ void JoyconDriver::OnNewData(std::span<u8> buffer) { | ||||
|             std::vector<u8> data(0x21C); | ||||
|             const auto result = nfc_protocol->ScanAmiibo(data); | ||||
|             if (result == DriverResult::Success) { | ||||
|                 joycon_poller->updateAmiibo(data); | ||||
|                 joycon_poller->UpdateAmiibo(data); | ||||
|                 amiibo_detected = true; | ||||
|             } | ||||
|         } | ||||
| @@ -391,12 +396,24 @@ DriverResult JoyconDriver::SetLedConfig(u8 led_pattern) { | ||||
|     return generic_protocol->SetLedPattern(led_pattern); | ||||
| } | ||||
|  | ||||
| DriverResult JoyconDriver::SetIrsConfig(IrsMode mode_, IrsResolution format_) { | ||||
|     std::scoped_lock lock{mutex}; | ||||
|     if (disable_input_thread) { | ||||
|         return DriverResult::HandleInUse; | ||||
|     } | ||||
|     disable_input_thread = true; | ||||
|     const auto result = irs_protocol->SetIrsConfig(mode_, format_); | ||||
|     disable_input_thread = false; | ||||
|     return result; | ||||
| } | ||||
|  | ||||
| DriverResult JoyconDriver::SetPasiveMode() { | ||||
|     std::scoped_lock lock{mutex}; | ||||
|     motion_enabled = false; | ||||
|     hidbus_enabled = false; | ||||
|     nfc_enabled = false; | ||||
|     passive_enabled = true; | ||||
|     irs_enabled = false; | ||||
|     return SetPollingMode(); | ||||
| } | ||||
|  | ||||
| @@ -406,6 +423,22 @@ DriverResult JoyconDriver::SetActiveMode() { | ||||
|     hidbus_enabled = false; | ||||
|     nfc_enabled = false; | ||||
|     passive_enabled = false; | ||||
|     irs_enabled = false; | ||||
|     return SetPollingMode(); | ||||
| } | ||||
|  | ||||
| DriverResult JoyconDriver::SetIrMode() { | ||||
|     std::scoped_lock lock{mutex}; | ||||
|  | ||||
|     if (!supported_features.irs) { | ||||
|         return DriverResult::NotSupported; | ||||
|     } | ||||
|  | ||||
|     motion_enabled = false; | ||||
|     hidbus_enabled = false; | ||||
|     nfc_enabled = false; | ||||
|     passive_enabled = false; | ||||
|     irs_enabled = true; | ||||
|     return SetPollingMode(); | ||||
| } | ||||
|  | ||||
| @@ -420,6 +453,7 @@ DriverResult JoyconDriver::SetNfcMode() { | ||||
|     hidbus_enabled = false; | ||||
|     nfc_enabled = true; | ||||
|     passive_enabled = false; | ||||
|     irs_enabled = false; | ||||
|     return SetPollingMode(); | ||||
| } | ||||
|  | ||||
| @@ -434,6 +468,7 @@ DriverResult JoyconDriver::SetRingConMode() { | ||||
|     hidbus_enabled = true; | ||||
|     nfc_enabled = false; | ||||
|     passive_enabled = false; | ||||
|     irs_enabled = false; | ||||
|  | ||||
|     const auto result = SetPollingMode(); | ||||
|  | ||||
|   | ||||
| @@ -42,8 +42,10 @@ public: | ||||
|  | ||||
|     DriverResult SetVibration(const VibrationValue& vibration); | ||||
|     DriverResult SetLedConfig(u8 led_pattern); | ||||
|     DriverResult SetIrsConfig(IrsMode mode_, IrsResolution format_); | ||||
|     DriverResult SetPasiveMode(); | ||||
|     DriverResult SetActiveMode(); | ||||
|     DriverResult SetIrMode(); | ||||
|     DriverResult SetNfcMode(); | ||||
|     DriverResult SetRingConMode(); | ||||
|  | ||||
|   | ||||
| @@ -68,6 +68,69 @@ DriverResult IrsProtocol::DisableIrs() { | ||||
|     return result; | ||||
| } | ||||
|  | ||||
| DriverResult IrsProtocol::SetIrsConfig(IrsMode mode, IrsResolution format) { | ||||
|     irs_mode = mode; | ||||
|     switch (format) { | ||||
|     case IrsResolution::Size320x240: | ||||
|         resolution_code = IrsResolutionCode::Size320x240; | ||||
|         fragments = IrsFragments::Size320x240; | ||||
|         resolution = IrsResolution::Size320x240; | ||||
|         break; | ||||
|     case IrsResolution::Size160x120: | ||||
|         resolution_code = IrsResolutionCode::Size160x120; | ||||
|         fragments = IrsFragments::Size160x120; | ||||
|         resolution = IrsResolution::Size160x120; | ||||
|         break; | ||||
|     case IrsResolution::Size80x60: | ||||
|         resolution_code = IrsResolutionCode::Size80x60; | ||||
|         fragments = IrsFragments::Size80x60; | ||||
|         resolution = IrsResolution::Size80x60; | ||||
|         break; | ||||
|     case IrsResolution::Size20x15: | ||||
|         resolution_code = IrsResolutionCode::Size20x15; | ||||
|         fragments = IrsFragments::Size20x15; | ||||
|         resolution = IrsResolution::Size20x15; | ||||
|         break; | ||||
|     case IrsResolution::Size40x30: | ||||
|     default: | ||||
|         resolution_code = IrsResolutionCode::Size40x30; | ||||
|         fragments = IrsFragments::Size40x30; | ||||
|         resolution = IrsResolution::Size40x30; | ||||
|         break; | ||||
|     } | ||||
|  | ||||
|     // Restart feature | ||||
|     if (is_enabled) { | ||||
|         DisableIrs(); | ||||
|         return EnableIrs(); | ||||
|     } | ||||
|  | ||||
|     return DriverResult::Success; | ||||
| } | ||||
|  | ||||
| DriverResult IrsProtocol::RequestImage(std::span<u8> buffer) { | ||||
|     const u8 next_packet_fragment = | ||||
|         static_cast<u8>((packet_fragment + 1) % (static_cast<u8>(fragments) + 1)); | ||||
|  | ||||
|     if (buffer[0] == 0x31 && buffer[49] == 0x03) { | ||||
|         u8 new_packet_fragment = buffer[52]; | ||||
|         if (new_packet_fragment == next_packet_fragment) { | ||||
|             packet_fragment = next_packet_fragment; | ||||
|             memcpy(buf_image.data() + (300 * packet_fragment), buffer.data() + 59, 300); | ||||
|  | ||||
|             return RequestFrame(packet_fragment); | ||||
|         } | ||||
|  | ||||
|         if (new_packet_fragment == packet_fragment) { | ||||
|             return RequestFrame(packet_fragment); | ||||
|         } | ||||
|  | ||||
|         return ResendFrame(next_packet_fragment); | ||||
|     } | ||||
|  | ||||
|     return RequestFrame(packet_fragment); | ||||
| } | ||||
|  | ||||
| DriverResult IrsProtocol::ConfigureIrs() { | ||||
|     LOG_DEBUG(Input, "Configure IRS"); | ||||
|     constexpr std::size_t max_tries = 28; | ||||
| @@ -78,11 +141,12 @@ DriverResult IrsProtocol::ConfigureIrs() { | ||||
|         .command = MCUCommand::ConfigureIR, | ||||
|         .sub_command = MCUSubCommand::SetDeviceMode, | ||||
|         .irs_mode = IrsMode::ImageTransfer, | ||||
|         .number_of_fragments = 0x3, | ||||
|         .number_of_fragments = fragments, | ||||
|         .mcu_major_version = 0x0500, | ||||
|         .mcu_minor_version = 0x1800, | ||||
|         .crc = {}, | ||||
|     }; | ||||
|     buf_image.resize((static_cast<u8>(fragments) + 1) * 300); | ||||
|  | ||||
|     std::vector<u8> request_data(sizeof(IrsConfigure)); | ||||
|     memcpy(request_data.data(), &irs_configuration, sizeof(IrsConfigure)); | ||||
| @@ -102,7 +166,7 @@ DriverResult IrsProtocol::ConfigureIrs() { | ||||
| } | ||||
|  | ||||
| DriverResult IrsProtocol::WriteRegistersStep1() { | ||||
|     LOG_DEBUG(Input, "Configure IRS"); | ||||
|     LOG_DEBUG(Input, "WriteRegistersStep1"); | ||||
|     DriverResult result{DriverResult::Success}; | ||||
|     constexpr std::size_t max_tries = 28; | ||||
|     std::vector<u8> output; | ||||
| @@ -114,15 +178,15 @@ DriverResult IrsProtocol::WriteRegistersStep1() { | ||||
|         .number_of_registers = 0x9, | ||||
|         .registers = | ||||
|             { | ||||
|                 IrsRegister{0x2e00, resolution}, | ||||
|                 {0x3001, static_cast<u8>(exposure & 0xff)}, | ||||
|                 {0x3101, static_cast<u8>(exposure >> 8)}, | ||||
|                 {0x3201, 0x00}, | ||||
|                 {0x1000, leds}, | ||||
|                 {0x2e01, static_cast<u8>((digital_gain & 0x0f) << 4)}, | ||||
|                 {0x2f01, static_cast<u8>((digital_gain & 0xf0) >> 4)}, | ||||
|                 {0x0e00, ex_light_filter}, | ||||
|                 {0x4301, 0xc8}, | ||||
|                 IrsRegister{IrRegistersAddress::Resolution, static_cast<u8>(resolution_code)}, | ||||
|                 {IrRegistersAddress::ExposureLSB, static_cast<u8>(exposure & 0xff)}, | ||||
|                 {IrRegistersAddress::ExposureMSB, static_cast<u8>(exposure >> 8)}, | ||||
|                 {IrRegistersAddress::ExposureTime, 0x00}, | ||||
|                 {IrRegistersAddress::Leds, static_cast<u8>(leds)}, | ||||
|                 {IrRegistersAddress::DigitalGainLSB, static_cast<u8>((digital_gain & 0x0f) << 4)}, | ||||
|                 {IrRegistersAddress::DigitalGainMSB, static_cast<u8>((digital_gain & 0xf0) >> 4)}, | ||||
|                 {IrRegistersAddress::LedFilter, static_cast<u8>(led_filter)}, | ||||
|                 {IrRegistersAddress::WhitePixelThreshold, 0xc8}, | ||||
|             }, | ||||
|         .crc = {}, | ||||
|     }; | ||||
| @@ -162,7 +226,7 @@ DriverResult IrsProtocol::WriteRegistersStep1() { | ||||
| } | ||||
|  | ||||
| DriverResult IrsProtocol::WriteRegistersStep2() { | ||||
|     LOG_DEBUG(Input, "Configure IRS"); | ||||
|     LOG_DEBUG(Input, "WriteRegistersStep2"); | ||||
|     constexpr std::size_t max_tries = 28; | ||||
|     std::vector<u8> output; | ||||
|     std::size_t tries = 0; | ||||
| @@ -173,14 +237,15 @@ DriverResult IrsProtocol::WriteRegistersStep2() { | ||||
|         .number_of_registers = 0x8, | ||||
|         .registers = | ||||
|             { | ||||
|                 IrsRegister{0x1100, static_cast<u8>(led_intensity >> 8)}, | ||||
|                 {0x1200, static_cast<u8>(led_intensity & 0xff)}, | ||||
|                 {0x2d00, image_flip}, | ||||
|                 {0x6701, static_cast<u8>((denoise >> 16) & 0xff)}, | ||||
|                 {0x6801, static_cast<u8>((denoise >> 8) & 0xff)}, | ||||
|                 {0x6901, static_cast<u8>(denoise & 0xff)}, | ||||
|                 {0x0400, 0x2d}, | ||||
|                 {0x0700, 0x01}, | ||||
|                 IrsRegister{IrRegistersAddress::LedIntensitiyMSB, | ||||
|                             static_cast<u8>(led_intensity >> 8)}, | ||||
|                 {IrRegistersAddress::LedIntensitiyLSB, static_cast<u8>(led_intensity & 0xff)}, | ||||
|                 {IrRegistersAddress::ImageFlip, static_cast<u8>(image_flip)}, | ||||
|                 {IrRegistersAddress::DenoiseSmoothing, static_cast<u8>((denoise >> 16) & 0xff)}, | ||||
|                 {IrRegistersAddress::DenoiseEdge, static_cast<u8>((denoise >> 8) & 0xff)}, | ||||
|                 {IrRegistersAddress::DenoiseColor, static_cast<u8>(denoise & 0xff)}, | ||||
|                 {IrRegistersAddress::UpdateTime, 0x2d}, | ||||
|                 {IrRegistersAddress::FinalizeConfig, 0x01}, | ||||
|             }, | ||||
|         .crc = {}, | ||||
|     }; | ||||
| @@ -202,6 +267,32 @@ DriverResult IrsProtocol::WriteRegistersStep2() { | ||||
|     return DriverResult::Success; | ||||
| } | ||||
|  | ||||
| DriverResult IrsProtocol::RequestFrame(u8 frame) { | ||||
|     std::array<u8, 38> mcu_request{}; | ||||
|     mcu_request[3] = frame; | ||||
|     mcu_request[36] = CalculateMCU_CRC8(mcu_request.data(), 36); | ||||
|     mcu_request[37] = 0xFF; | ||||
|     return SendMcuCommand(SubCommand::SET_REPORT_MODE, mcu_request); | ||||
| } | ||||
|  | ||||
| DriverResult IrsProtocol::ResendFrame(u8 frame) { | ||||
|     std::array<u8, 38> mcu_request{}; | ||||
|     mcu_request[1] = 0x1; | ||||
|     mcu_request[2] = frame; | ||||
|     mcu_request[3] = 0x0; | ||||
|     mcu_request[36] = CalculateMCU_CRC8(mcu_request.data(), 36); | ||||
|     mcu_request[37] = 0xFF; | ||||
|     return SendMcuCommand(SubCommand::SET_REPORT_MODE, mcu_request); | ||||
| } | ||||
|  | ||||
| std::vector<u8> IrsProtocol::GetImage() const { | ||||
|     return buf_image; | ||||
| } | ||||
|  | ||||
| IrsResolution IrsProtocol::GetIrsFormat() const { | ||||
|     return resolution; | ||||
| } | ||||
|  | ||||
| bool IrsProtocol::IsEnabled() const { | ||||
|     return is_enabled; | ||||
| } | ||||
|   | ||||
| @@ -23,6 +23,14 @@ public: | ||||
|  | ||||
|     DriverResult DisableIrs(); | ||||
|  | ||||
|     DriverResult SetIrsConfig(IrsMode mode, IrsResolution format); | ||||
|  | ||||
|     DriverResult RequestImage(std::span<u8> buffer); | ||||
|  | ||||
|     std::vector<u8> GetImage() const; | ||||
|  | ||||
|     IrsResolution GetIrsFormat() const; | ||||
|  | ||||
|     bool IsEnabled() const; | ||||
|  | ||||
| private: | ||||
| @@ -31,16 +39,25 @@ private: | ||||
|     DriverResult WriteRegistersStep1(); | ||||
|     DriverResult WriteRegistersStep2(); | ||||
|  | ||||
|     bool is_enabled{}; | ||||
|     DriverResult RequestFrame(u8 frame); | ||||
|     DriverResult ResendFrame(u8 frame); | ||||
|  | ||||
|     u8 resolution = 0x69; | ||||
|     u8 leds = 0x00; | ||||
|     u8 ex_light_filter = 0x03; | ||||
|     u8 image_flip = 0x00; | ||||
|     u8 digital_gain = 0x01; | ||||
|     u16 exposure = 0x2490; | ||||
|     u16 led_intensity = 0x0f10; | ||||
|     u32 denoise = 0x012344; | ||||
|     IrsMode irs_mode{IrsMode::ImageTransfer}; | ||||
|     IrsResolution resolution{IrsResolution::Size40x30}; | ||||
|     IrsResolutionCode resolution_code{IrsResolutionCode::Size40x30}; | ||||
|     IrsFragments fragments{IrsFragments::Size40x30}; | ||||
|     IrLeds leds{IrLeds::BrightAndDim}; | ||||
|     IrExLedFilter led_filter{IrExLedFilter::Enabled}; | ||||
|     IrImageFlip image_flip{IrImageFlip::Normal}; | ||||
|     u8 digital_gain{0x01}; | ||||
|     u16 exposure{0x2490}; | ||||
|     u16 led_intensity{0x0f10}; | ||||
|     u32 denoise{0x012344}; | ||||
|  | ||||
|     u8 packet_fragment{}; | ||||
|     std::vector<u8> buf_image; // 8bpp greyscale image. | ||||
|  | ||||
|     bool is_enabled{}; | ||||
| }; | ||||
|  | ||||
| } // namespace InputCommon::Joycon | ||||
|   | ||||
| @@ -18,7 +18,7 @@ | ||||
|  | ||||
| namespace InputCommon::Joycon { | ||||
| constexpr u32 MaxErrorCount = 50; | ||||
| constexpr u32 MaxBufferSize = 60; | ||||
| constexpr u32 MaxBufferSize = 368; | ||||
| constexpr u32 MaxResponseSize = 49; | ||||
| constexpr u32 MaxSubCommandResponseSize = 64; | ||||
| constexpr std::array<u8, 8> DefaultVibrationBuffer{0x0, 0x1, 0x40, 0x40, 0x0, 0x1, 0x40, 0x40}; | ||||
| @@ -284,6 +284,69 @@ enum class IrsMode : u8 { | ||||
|     SilhouetteTeraImage = 0x0A, | ||||
| }; | ||||
|  | ||||
| enum class IrsResolution { | ||||
|     Size320x240, | ||||
|     Size160x120, | ||||
|     Size80x60, | ||||
|     Size40x30, | ||||
|     Size20x15, | ||||
|     None, | ||||
| }; | ||||
|  | ||||
| enum class IrsResolutionCode : u8 { | ||||
|     Size320x240 = 0x00, // Full pixel array | ||||
|     Size160x120 = 0x50, // Sensor Binning [2 X 2] | ||||
|     Size80x60 = 0x64,   // Sensor Binning [4 x 2] and Skipping [1 x 2] | ||||
|     Size40x30 = 0x69,   // Sensor Binning [4 x 2] and Skipping [2 x 4] | ||||
|     Size20x15 = 0x6A,   // Sensor Binning [4 x 2] and Skipping [4 x 4] | ||||
| }; | ||||
|  | ||||
| // Size of image divided by 300 | ||||
| enum class IrsFragments : u8 { | ||||
|     Size20x15 = 0x00, | ||||
|     Size40x30 = 0x03, | ||||
|     Size80x60 = 0x0f, | ||||
|     Size160x120 = 0x3f, | ||||
|     Size320x240 = 0xFF, | ||||
| }; | ||||
|  | ||||
| enum class IrLeds : u8 { | ||||
|     BrightAndDim = 0x00, | ||||
|     Bright = 0x20, | ||||
|     Dim = 0x10, | ||||
|     None = 0x30, | ||||
| }; | ||||
|  | ||||
| enum class IrExLedFilter : u8 { | ||||
|     Disabled = 0x00, | ||||
|     Enabled = 0x03, | ||||
| }; | ||||
|  | ||||
| enum class IrImageFlip : u8 { | ||||
|     Normal = 0x00, | ||||
|     Inverted = 0x02, | ||||
| }; | ||||
|  | ||||
| enum class IrRegistersAddress : u16 { | ||||
|     UpdateTime = 0x0400, | ||||
|     FinalizeConfig = 0x0700, | ||||
|     LedFilter = 0x0e00, | ||||
|     Leds = 0x1000, | ||||
|     LedIntensitiyMSB = 0x1100, | ||||
|     LedIntensitiyLSB = 0x1200, | ||||
|     ImageFlip = 0x2d00, | ||||
|     Resolution = 0x2e00, | ||||
|     DigitalGainLSB = 0x2e01, | ||||
|     DigitalGainMSB = 0x2f01, | ||||
|     ExposureLSB = 0x3001, | ||||
|     ExposureMSB = 0x3101, | ||||
|     ExposureTime = 0x3201, | ||||
|     WhitePixelThreshold = 0x4301, | ||||
|     DenoiseSmoothing = 0x6701, | ||||
|     DenoiseEdge = 0x6801, | ||||
|     DenoiseColor = 0x6901, | ||||
| }; | ||||
|  | ||||
| enum class DriverResult { | ||||
|     Success, | ||||
|     WrongReply, | ||||
| @@ -471,7 +534,7 @@ struct IrsConfigure { | ||||
|     MCUCommand command; | ||||
|     MCUSubCommand sub_command; | ||||
|     IrsMode irs_mode; | ||||
|     u8 number_of_fragments; | ||||
|     IrsFragments number_of_fragments; | ||||
|     u16 mcu_major_version; | ||||
|     u16 mcu_minor_version; | ||||
|     INSERT_PADDING_BYTES(0x1D); | ||||
| @@ -481,7 +544,7 @@ static_assert(sizeof(IrsConfigure) == 0x26, "IrsConfigure is an invalid size"); | ||||
|  | ||||
| #pragma pack(push, 1) | ||||
| struct IrsRegister { | ||||
|     u16 address; | ||||
|     IrRegistersAddress address; | ||||
|     u8 value; | ||||
| }; | ||||
| static_assert(sizeof(IrsRegister) == 0x3, "IrsRegister is an invalid size"); | ||||
| @@ -531,6 +594,7 @@ struct JoyconCallbacks { | ||||
|     std::function<void(int, const MotionData&)> on_motion_data; | ||||
|     std::function<void(f32)> on_ring_data; | ||||
|     std::function<void(const std::vector<u8>&)> on_amiibo_data; | ||||
|     std::function<void(const std::vector<u8>&, IrsResolution)> on_camera_data; | ||||
| }; | ||||
|  | ||||
| } // namespace InputCommon::Joycon | ||||
|   | ||||
| @@ -74,10 +74,14 @@ void JoyconPoller::UpdateColor(const Color& color) { | ||||
|     callbacks.on_color_data(color); | ||||
| } | ||||
|  | ||||
| void JoyconPoller::updateAmiibo(const std::vector<u8>& amiibo_data) { | ||||
| void JoyconPoller::UpdateAmiibo(const std::vector<u8>& amiibo_data) { | ||||
|     callbacks.on_amiibo_data(amiibo_data); | ||||
| } | ||||
|  | ||||
| void JoyconPoller::UpdateCamera(const std::vector<u8>& camera_data, IrsResolution format) { | ||||
|     callbacks.on_camera_data(camera_data, format); | ||||
| } | ||||
|  | ||||
| void JoyconPoller::UpdateRing(s16 value, const RingStatus& ring_status) { | ||||
|     float normalized_value = static_cast<float>(value - ring_status.default_value); | ||||
|     if (normalized_value > 0) { | ||||
|   | ||||
| @@ -36,7 +36,8 @@ public: | ||||
|  | ||||
|     void UpdateColor(const Color& color); | ||||
|     void UpdateRing(s16 value, const RingStatus& ring_status); | ||||
|     void updateAmiibo(const std::vector<u8>& amiibo_data); | ||||
|     void UpdateAmiibo(const std::vector<u8>& amiibo_data); | ||||
|     void UpdateCamera(const std::vector<u8>& amiibo_data, IrsResolution format); | ||||
|  | ||||
| private: | ||||
|     void UpdateActiveLeftPadInput(const InputReportActive& input, | ||||
|   | ||||
		Reference in New Issue
	
	Block a user