early-access version 3029

This commit is contained in:
pineappleEA 2022-10-16 01:00:39 +02:00
parent 0c8e3298c3
commit 7d967241e6
47 changed files with 2498 additions and 1280 deletions

View File

@ -1,7 +1,7 @@
yuzu emulator early access
=============
This is the source code for early-access 3028.
This is the source code for early-access 3029.
## Legal Notice

View File

@ -190,6 +190,9 @@ add_library(core STATIC
hle/kernel/k_code_memory.h
hle/kernel/k_condition_variable.cpp
hle/kernel/k_condition_variable.h
hle/kernel/k_dynamic_page_manager.h
hle/kernel/k_dynamic_resource_manager.h
hle/kernel/k_dynamic_slab_heap.h
hle/kernel/k_event.cpp
hle/kernel/k_event.h
hle/kernel/k_handle_table.cpp

View File

@ -134,6 +134,14 @@ void ARM_Interface::Run() {
}
system.ExitDynarmicProfile();
// If the thread is scheduled for termination, exit the thread.
if (current_thread->HasDpc()) {
if (current_thread->IsTerminationRequested()) {
current_thread->Exit();
UNREACHABLE();
}
}
// Notify the debugger and go to sleep if a breakpoint was hit,
// or if the thread is unable to continue for any reason.
if (Has(hr, breakpoint) || Has(hr, no_execute)) {

View File

@ -133,6 +133,48 @@ struct System::Impl {
: kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {}
void Initialize(System& system) {
device_memory = std::make_unique<Core::DeviceMemory>();
is_multicore = Settings::values.use_multi_core.GetValue();
core_timing.SetMulticore(is_multicore);
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
const auto current_time =
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
Settings::values.custom_rtc_differential =
Settings::values.custom_rtc.value_or(current_time) - current_time;
// Create a default fs if one doesn't already exist.
if (virtual_filesystem == nullptr)
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
if (content_provider == nullptr)
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
// Create default implementations of applets if one is not provided.
applet_manager.SetDefaultAppletsIfMissing();
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
kernel.SetMulticore(is_multicore);
cpu_manager.SetMulticore(is_multicore);
cpu_manager.SetAsyncGpu(is_async_gpu);
}
void ReinitializeIfNecessary(System& system) {
if (is_multicore == Settings::values.use_multi_core.GetValue()) {
return;
}
LOG_DEBUG(Kernel, "Re-initializing");
is_multicore = Settings::values.use_multi_core.GetValue();
Initialize(system);
}
SystemResultStatus Run() {
std::unique_lock<std::mutex> lk(suspend_guard);
status = SystemResultStatus::Success;
@ -178,37 +220,14 @@ struct System::Impl {
debugger = std::make_unique<Debugger>(system, port);
}
SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) {
SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) {
LOG_DEBUG(Core, "initialized OK");
device_memory = std::make_unique<Core::DeviceMemory>();
is_multicore = Settings::values.use_multi_core.GetValue();
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
kernel.SetMulticore(is_multicore);
cpu_manager.SetMulticore(is_multicore);
cpu_manager.SetAsyncGpu(is_async_gpu);
core_timing.SetMulticore(is_multicore);
// Setting changes may require a full system reinitialization (e.g., disabling multicore).
ReinitializeIfNecessary(system);
kernel.Initialize();
cpu_manager.Initialize();
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
const auto current_time =
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
Settings::values.custom_rtc_differential =
Settings::values.custom_rtc.value_or(current_time) - current_time;
// Create a default fs if one doesn't already exist.
if (virtual_filesystem == nullptr)
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
if (content_provider == nullptr)
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
/// Create default implementations of applets if one is not provided.
applet_manager.SetDefaultAppletsIfMissing();
/// Reset all glue registrations
arp_manager.ResetAll();
@ -253,11 +272,11 @@ struct System::Impl {
return SystemResultStatus::ErrorGetLoader;
}
SystemResultStatus init_result{Init(system, emu_window)};
SystemResultStatus init_result{SetupForMainProcess(system, emu_window)};
if (init_result != SystemResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
static_cast<int>(init_result));
Shutdown();
ShutdownMainProcess();
return init_result;
}
@ -276,7 +295,7 @@ struct System::Impl {
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
if (load_result != Loader::ResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
Shutdown();
ShutdownMainProcess();
return static_cast<SystemResultStatus>(
static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
@ -335,7 +354,7 @@ struct System::Impl {
return status;
}
void Shutdown() {
void ShutdownMainProcess() {
SetShuttingDown(true);
// Log last frame performance stats if game was loded
@ -369,7 +388,7 @@ struct System::Impl {
cheat_engine.reset();
telemetry_session.reset();
time_manager.Shutdown();
core_timing.Shutdown();
core_timing.ClearPendingEvents();
app_loader.reset();
audio_core.reset();
gpu_core.reset();
@ -377,7 +396,6 @@ struct System::Impl {
perf_stats.reset();
kernel.Shutdown();
memory.Reset();
applet_manager.ClearAll();
if (auto room_member = room_network.GetRoomMember().lock()) {
Network::GameInfo game_info{};
@ -520,6 +538,10 @@ const CpuManager& System::GetCpuManager() const {
return impl->cpu_manager;
}
void System::Initialize() {
impl->Initialize(*this);
}
SystemResultStatus System::Run() {
return impl->Run();
}
@ -540,8 +562,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
}
void System::Shutdown() {
impl->Shutdown();
void System::ShutdownMainProcess() {
impl->ShutdownMainProcess();
}
bool System::IsShuttingDown() const {

View File

@ -142,6 +142,12 @@ public:
System(System&&) = delete;
System& operator=(System&&) = delete;
/**
* Initializes the system
* This function will initialize core functionaility used for system emulation
*/
void Initialize();
/**
* Run the OS and Application
* This function will start emulation and run the relevant devices
@ -166,8 +172,8 @@ public:
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
/// Shutdown the emulated system.
void Shutdown();
/// Shutdown the main emulated process.
void ShutdownMainProcess();
/// Check if the core is shutting down.
[[nodiscard]] bool IsShuttingDown() const;

View File

@ -40,7 +40,9 @@ struct CoreTiming::Event {
CoreTiming::CoreTiming()
: clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
CoreTiming::~CoreTiming() = default;
CoreTiming::~CoreTiming() {
Reset();
}
void CoreTiming::ThreadEntry(CoreTiming& instance) {
constexpr char name[] = "HostTiming";
@ -53,6 +55,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
}
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
Reset();
on_thread_init = std::move(on_thread_init_);
event_fifo_id = 0;
shutting_down = false;
@ -65,17 +68,8 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
}
}
void CoreTiming::Shutdown() {
paused = true;
shutting_down = true;
pause_event.Set();
event.Set();
if (timer_thread) {
timer_thread->join();
}
ClearPendingEvents();
timer_thread.reset();
has_started = false;
void CoreTiming::ClearPendingEvents() {
event_queue.clear();
}
void CoreTiming::Pause(bool is_paused) {
@ -196,10 +190,6 @@ u64 CoreTiming::GetClockTicks() const {
return CpuCyclesToClockCycles(ticks);
}
void CoreTiming::ClearPendingEvents() {
event_queue.clear();
}
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
std::scoped_lock lock{basic_lock};
@ -307,6 +297,18 @@ void CoreTiming::ThreadLoop() {
}
}
void CoreTiming::Reset() {
paused = true;
shutting_down = true;
pause_event.Set();
event.Set();
if (timer_thread) {
timer_thread->join();
}
timer_thread.reset();
has_started = false;
}
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
if (is_multicore) {
return clock->GetTimeNS();

View File

@ -61,19 +61,14 @@ public:
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
void Initialize(std::function<void()>&& on_thread_init_);
/// Tears down all timing related functionality.
void Shutdown();
/// Clear all pending events. This should ONLY be done on exit.
void ClearPendingEvents();
/// Sets if emulation is multicore or single core, must be set before Initialize
void SetMulticore(bool is_multicore_) {
is_multicore = is_multicore_;
}
/// Check if it's using host timing.
bool IsHostTiming() const {
return is_multicore;
}
/// Pauses/Unpauses the execution of the timer thread.
void Pause(bool is_paused);
@ -136,12 +131,11 @@ public:
private:
struct Event;
/// Clear all pending events. This should ONLY be done on exit.
void ClearPendingEvents();
static void ThreadEntry(CoreTiming& instance);
void ThreadLoop();
void Reset();
std::unique_ptr<Common::WallClock> clock;
s64 global_timer = 0;

View File

@ -31,12 +31,14 @@ public:
DramMemoryMap::Base;
}
u8* GetPointer(PAddr addr) {
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
template <typename T>
T* GetPointer(PAddr addr) {
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
}
const u8* GetPointer(PAddr addr) const {
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
template <typename T>
const T* GetPointer(PAddr addr) const {
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
}
Common::HostMemory buffer;

View File

@ -94,8 +94,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
// TODO(bunnei): Fix this once we support the kernel virtual memory layout.
if (size > 0) {
void* backing_kernel_memory{
system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>(
TranslateSlabAddrToPhysical(memory_layout, start))};
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
ASSERT(region != nullptr);
@ -181,7 +181,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) {
ASSERT(slab_address != 0);
// Initialize the slabheap.
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
slab_size);
}

View File

@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
// Clear the memory.
for (const auto& block : m_page_group.Nodes()) {
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
}
// Set remaining tracking members.

View File

@ -0,0 +1,136 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/alignment.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_page_bitmap.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
class KDynamicPageManager {
public:
class PageBuffer {
private:
u8 m_buffer[PageSize];
};
static_assert(sizeof(PageBuffer) == PageSize);
private:
KSpinLock m_lock;
KPageBitmap m_page_bitmap;
size_t m_used{};
size_t m_peak{};
size_t m_count{};
VAddr m_address{};
size_t m_size{};
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
std::vector<u8> m_backing_memory;
public:
KDynamicPageManager() = default;
template <typename T>
T* GetPointer(VAddr addr) {
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
template <typename T>
const T* GetPointer(VAddr addr) const {
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
Result Initialize(VAddr addr, size_t sz) {
// We need to have positive size.
R_UNLESS(sz > 0, ResultOutOfMemory);
m_backing_memory.resize(sz);
// Calculate management overhead.
const size_t management_size =
KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer));
const size_t allocatable_size = sz - management_size;
// Set tracking fields.
m_address = addr;
m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer));
m_count = allocatable_size / sizeof(PageBuffer);
R_UNLESS(m_count > 0, ResultOutOfMemory);
// Clear the management region.
u64* management_ptr = GetPointer<u64>(m_address + allocatable_size);
std::memset(management_ptr, 0, management_size);
// Initialize the bitmap.
m_page_bitmap.Initialize(management_ptr, m_count);
// Free the pages to the bitmap.
for (size_t i = 0; i < m_count; i++) {
// Ensure the freed page is all-zero.
std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize);
// Set the bit for the free page.
m_page_bitmap.SetBit(i);
}
R_SUCCEED();
}
constexpr VAddr GetAddress() const {
return m_address;
}
constexpr size_t GetSize() const {
return m_size;
}
constexpr size_t GetUsed() const {
return m_used;
}
constexpr size_t GetPeak() const {
return m_peak;
}
constexpr size_t GetCount() const {
return m_count;
}
PageBuffer* Allocate() {
// Take the lock.
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
KScopedSpinLock lk(m_lock);
// Find a random free block.
s64 soffset = m_page_bitmap.FindFreeBlock(true);
if (soffset < 0) [[unlikely]] {
return nullptr;
}
const size_t offset = static_cast<size_t>(soffset);
// Update our tracking.
m_page_bitmap.ClearBit(offset);
m_peak = std::max(m_peak, (++m_used));
return GetPointer<PageBuffer>(m_address) + offset;
}
void Free(PageBuffer* pb) {
// Ensure all pages in the heap are zero.
std::memset(pb, 0, PageSize);
// Take the lock.
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
KScopedSpinLock lk(m_lock);
// Set the bit for the free page.
size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer);
m_page_bitmap.SetBit(offset);
// Decrement our used count.
--m_used;
}
};
} // namespace Kernel

View File

@ -0,0 +1,58 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
#include "core/hle/kernel/k_dynamic_slab_heap.h"
#include "core/hle/kernel/k_memory_block.h"
namespace Kernel {
template <typename T, bool ClearNode = false>
class KDynamicResourceManager {
YUZU_NON_COPYABLE(KDynamicResourceManager);
YUZU_NON_MOVEABLE(KDynamicResourceManager);
public:
using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;
private:
KDynamicPageManager* m_page_allocator{};
DynamicSlabType* m_slab_heap{};
public:
constexpr KDynamicResourceManager() = default;
constexpr size_t GetSize() const {
return m_slab_heap->GetSize();
}
constexpr size_t GetUsed() const {
return m_slab_heap->GetUsed();
}
constexpr size_t GetPeak() const {
return m_slab_heap->GetPeak();
}
constexpr size_t GetCount() const {
return m_slab_heap->GetCount();
}
void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) {
m_page_allocator = page_allocator;
m_slab_heap = slab_heap;
}
T* Allocate() const {
return m_slab_heap->Allocate(m_page_allocator);
}
void Free(T* t) const {
m_slab_heap->Free(t);
}
};
class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {};
using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;
} // namespace Kernel

View File

@ -0,0 +1,122 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <atomic>
#include "common/common_funcs.h"
#include "core/hle/kernel/k_dynamic_page_manager.h"
#include "core/hle/kernel/k_slab_heap.h"
namespace Kernel {
template <typename T, bool ClearNode = false>
class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
YUZU_NON_COPYABLE(KDynamicSlabHeap);
YUZU_NON_MOVEABLE(KDynamicSlabHeap);
private:
using PageBuffer = KDynamicPageManager::PageBuffer;
private:
std::atomic<size_t> m_used{};
std::atomic<size_t> m_peak{};
std::atomic<size_t> m_count{};
VAddr m_address{};
size_t m_size{};
public:
constexpr KDynamicSlabHeap() = default;
constexpr VAddr GetAddress() const {
return m_address;
}
constexpr size_t GetSize() const {
return m_size;
}
constexpr size_t GetUsed() const {
return m_used.load();
}
constexpr size_t GetPeak() const {
return m_peak.load();
}
constexpr size_t GetCount() const {
return m_count.load();
}
constexpr bool IsInRange(VAddr addr) const {
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
}
void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) {
ASSERT(page_allocator != nullptr);
// Initialize members.
m_address = page_allocator->GetAddress();
m_size = page_allocator->GetSize();
// Initialize the base allocator.
KSlabHeapImpl::Initialize();
// Allocate until we have the correct number of objects.
while (m_count.load() < num_objects) {
auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate());
ASSERT(allocated != nullptr);
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
KSlabHeapImpl::Free(allocated + i);
}
m_count += sizeof(PageBuffer) / sizeof(T);
}
}
T* Allocate(KDynamicPageManager* page_allocator) {
T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate());
// If we successfully allocated and we should clear the node, do so.
if constexpr (ClearNode) {
if (allocated != nullptr) [[likely]] {
reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr;
}
}
// If we fail to allocate, try to get a new page from our next allocator.
if (allocated == nullptr) [[unlikely]] {
if (page_allocator != nullptr) {
allocated = reinterpret_cast<T*>(page_allocator->Allocate());
if (allocated != nullptr) {
// If we succeeded in getting a page, free the rest to our slab.
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
KSlabHeapImpl::Free(allocated + i);
}
m_count += sizeof(PageBuffer) / sizeof(T);
}
}
}
if (allocated != nullptr) [[likely]] {
// Construct the object.
std::construct_at(allocated);
// Update our tracking.
const size_t used = ++m_used;
size_t peak = m_peak.load();
while (peak < used) {
if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
break;
}
}
}
return allocated;
}
void Free(T* t) {
KSlabHeapImpl::Free(t);
--m_used;
}
};
} // namespace Kernel

View File

@ -11,29 +11,34 @@
namespace Kernel::KInterruptManager {
void HandleInterrupt(KernelCore& kernel, s32 core_id) {
auto* process = kernel.CurrentProcess();
if (!process) {
return;
}
// Acknowledge the interrupt.
kernel.PhysicalCore(core_id).ClearInterrupt();
auto& current_thread = GetCurrentThread(kernel);
// If the user disable count is set, we may need to pin the current thread.
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
KScopedSchedulerLock sl{kernel};
if (auto* process = kernel.CurrentProcess(); process) {
// If the user disable count is set, we may need to pin the current thread.
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
KScopedSchedulerLock sl{kernel};
// Pin the current thread.
process->PinCurrentThread(core_id);
// Pin the current thread.
process->PinCurrentThread(core_id);
// Set the interrupt flag for the thread.
GetCurrentThread(kernel).SetInterruptFlag();
// Set the interrupt flag for the thread.
GetCurrentThread(kernel).SetInterruptFlag();
}
}
// Request interrupt scheduling.
kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
}
void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) {
for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
if (core_mask & (1ULL << core_id)) {
kernel.PhysicalCore(core_id).Interrupt();
}
}
}
} // namespace Kernel::KInterruptManager

View File

@ -11,6 +11,8 @@ class KernelCore;
namespace KInterruptManager {
void HandleInterrupt(KernelCore& kernel, s32 core_id);
}
void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask);
} // namespace KInterruptManager
} // namespace Kernel

View File

@ -6,6 +6,7 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_types.h"
@ -168,9 +169,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per
enum class KMemoryAttribute : u8 {
None = 0x00,
Mask = 0x7F,
All = Mask,
DontCareMask = 0x80,
All = 0xFF,
UserMask = All,
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
@ -178,76 +178,112 @@ enum class KMemoryAttribute : u8 {
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
SetMask = Uncached,
IpcAndDeviceMapped = IpcLocked | DeviceShared,
LockedAndIpcLocked = Locked | IpcLocked,
DeviceSharedAndUncached = DeviceShared | Uncached
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
static_assert((static_cast<u8>(KMemoryAttribute::Mask) &
static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0);
enum class KMemoryBlockDisableMergeAttribute : u8 {
None = 0,
Normal = (1u << 0),
DeviceLeft = (1u << 1),
IpcLeft = (1u << 2),
Locked = (1u << 3),
DeviceRight = (1u << 4),
AllLeft = Normal | DeviceLeft | IpcLeft | Locked,
AllRight = DeviceRight,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute);
struct KMemoryInfo {
VAddr addr{};
std::size_t size{};
KMemoryState state{};
KMemoryPermission perm{};
KMemoryAttribute attribute{};
KMemoryPermission original_perm{};
u16 ipc_lock_count{};
u16 device_use_count{};
uintptr_t m_address;
size_t m_size;
KMemoryState m_state;
u16 m_device_disable_merge_left_count;
u16 m_device_disable_merge_right_count;
u16 m_ipc_lock_count;
u16 m_device_use_count;
u16 m_ipc_disable_merge_count;
KMemoryPermission m_permission;
KMemoryAttribute m_attribute;
KMemoryPermission m_original_permission;
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
return {
addr,
size,
static_cast<Svc::MemoryState>(state & KMemoryState::Mask),
static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask),
static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask),
ipc_lock_count,
device_use_count,
.addr = m_address,
.size = m_size,
.state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask),
.attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask),
.perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask),
.ipc_refcount = m_ipc_lock_count,
.device_refcount = m_device_use_count,
.padding = {},
};
}
constexpr VAddr GetAddress() const {
return addr;
constexpr uintptr_t GetAddress() const {
return m_address;
}
constexpr std::size_t GetSize() const {
return size;
constexpr size_t GetSize() const {
return m_size;
}
constexpr std::size_t GetNumPages() const {
return GetSize() / PageSize;
constexpr size_t GetNumPages() const {
return this->GetSize() / PageSize;
}
constexpr VAddr GetEndAddress() const {
return GetAddress() + GetSize();
constexpr uintptr_t GetEndAddress() const {
return this->GetAddress() + this->GetSize();
}
constexpr VAddr GetLastAddress() const {
return GetEndAddress() - 1;
constexpr uintptr_t GetLastAddress() const {
return this->GetEndAddress() - 1;
}
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
constexpr u16 GetIpcDisableMergeCount() const {
return m_ipc_disable_merge_count;
}
constexpr KMemoryState GetState() const {
return state;
}
constexpr KMemoryAttribute GetAttribute() const {
return attribute;
return m_state;
}
constexpr KMemoryPermission GetPermission() const {
return perm;
return m_permission;
}
constexpr KMemoryPermission GetOriginalPermission() const {
return m_original_permission;
}
constexpr KMemoryAttribute GetAttribute() const {
return m_attribute;
}
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
return m_disable_merge_attribute;
}
};
class KMemoryBlock final {
friend class KMemoryBlockManager;
class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
private:
VAddr addr{};
std::size_t num_pages{};
KMemoryState state{KMemoryState::None};
u16 ipc_lock_count{};
u16 device_use_count{};
KMemoryPermission perm{KMemoryPermission::None};
KMemoryPermission original_perm{KMemoryPermission::None};
KMemoryAttribute attribute{KMemoryAttribute::None};
u16 m_device_disable_merge_left_count;
u16 m_device_disable_merge_right_count;
VAddr m_address;
size_t m_num_pages;
KMemoryState m_memory_state;
u16 m_ipc_lock_count;
u16 m_device_use_count;
u16 m_ipc_disable_merge_count;
KMemoryPermission m_permission;
KMemoryPermission m_original_permission;
KMemoryAttribute m_attribute;
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
public:
static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
@ -261,113 +297,349 @@ public:
}
public:
constexpr KMemoryBlock() = default;
constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
KMemoryPermission perm_, KMemoryAttribute attribute_)
: addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
constexpr VAddr GetAddress() const {
return addr;
return m_address;
}
constexpr std::size_t GetNumPages() const {
return num_pages;
constexpr size_t GetNumPages() const {
return m_num_pages;
}
constexpr std::size_t GetSize() const {
return GetNumPages() * PageSize;
constexpr size_t GetSize() const {
return this->GetNumPages() * PageSize;
}
constexpr VAddr GetEndAddress() const {
return GetAddress() + GetSize();
return this->GetAddress() + this->GetSize();
}
constexpr VAddr GetLastAddress() const {
return GetEndAddress() - 1;
return this->GetEndAddress() - 1;
}
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
constexpr u16 GetIpcDisableMergeCount() const {
return m_ipc_disable_merge_count;
}
constexpr KMemoryPermission GetPermission() const {
return m_permission;
}
constexpr KMemoryPermission GetOriginalPermission() const {
return m_original_permission;
}
constexpr KMemoryAttribute GetAttribute() const {
return m_attribute;
}
constexpr KMemoryInfo GetMemoryInfo() const {
return {
GetAddress(), GetSize(), state, perm,
attribute, original_perm, ipc_lock_count, device_use_count,
.m_address = this->GetAddress(),
.m_size = this->GetSize(),
.m_state = m_memory_state,
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
.m_device_disable_merge_right_count = m_device_disable_merge_right_count,
.m_ipc_lock_count = m_ipc_lock_count,
.m_device_use_count = m_device_use_count,
.m_ipc_disable_merge_count = m_ipc_disable_merge_count,
.m_permission = m_permission,
.m_attribute = m_attribute,
.m_original_permission = m_original_permission,
.m_disable_merge_attribute = m_disable_merge_attribute,
};
}
void ShareToDevice(KMemoryPermission /*new_perm*/) {
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
device_use_count == 0);
attribute |= KMemoryAttribute::DeviceShared;
const u16 new_use_count{++device_use_count};
ASSERT(new_use_count > 0);
public:
explicit KMemoryBlock() = default;
constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
KMemoryAttribute attr)
: Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(),
m_device_disable_merge_left_count(), m_device_disable_merge_right_count(),
m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p),
m_original_permission(KMemoryPermission::None), m_attribute(attr),
m_disable_merge_attribute() {}
constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
KMemoryAttribute attr) {
m_device_disable_merge_left_count = 0;
m_device_disable_merge_right_count = 0;
m_address = addr;
m_num_pages = np;
m_memory_state = ms;
m_ipc_lock_count = 0;
m_device_use_count = 0;
m_permission = p;
m_original_permission = KMemoryPermission::None;
m_attribute = attr;
m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None;
}
void UnshareToDevice(KMemoryPermission /*new_perm*/) {
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
const u16 prev_use_count{device_use_count--};
ASSERT(prev_use_count > 0);
if (prev_use_count == 1) {
attribute &= ~KMemoryAttribute::DeviceShared;
}
}
private:
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask |
KMemoryAttribute::IpcLocked |
KMemoryAttribute::DeviceShared};
return state == s && perm == p &&
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
constexpr auto AttributeIgnoreMask =
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
return m_memory_state == s && m_permission == p &&
(m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
}
constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
device_use_count == rhs.device_use_count;
return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission &&
m_original_permission == rhs.m_original_permission &&
m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count &&
m_device_use_count == rhs.m_device_use_count;
}
constexpr bool Contains(VAddr start) const {
return GetAddress() <= start && start <= GetEndAddress();
constexpr bool CanMergeWith(const KMemoryBlock& rhs) const {
return this->HasSameProperties(rhs) &&
(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) ==
KMemoryBlockDisableMergeAttribute::None &&
(rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) ==
KMemoryBlockDisableMergeAttribute::None;
}
constexpr void Add(std::size_t count) {
ASSERT(count > 0);
ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
num_pages += count;
constexpr bool Contains(VAddr addr) const {
return this->GetAddress() <= addr && addr <= this->GetEndAddress();
}
constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm,
KMemoryAttribute new_attribute) {
ASSERT(original_perm == KMemoryPermission::None);
ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
constexpr void Add(const KMemoryBlock& added_block) {
ASSERT(added_block.GetNumPages() > 0);
ASSERT(this->GetAddress() + added_block.GetSize() - 1 <
this->GetEndAddress() + added_block.GetSize() - 1);
state = new_state;
perm = new_perm;
attribute = static_cast<KMemoryAttribute>(
new_attribute |
(attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
m_num_pages += added_block.GetNumPages();
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | added_block.m_disable_merge_attribute);
m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
}
constexpr KMemoryBlock Split(VAddr split_addr) {
ASSERT(GetAddress() < split_addr);
ASSERT(Contains(split_addr));
ASSERT(Common::IsAligned(split_addr, PageSize));
constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a,
bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
ASSERT(m_original_permission == KMemoryPermission::None);
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
KMemoryBlock block;
block.addr = addr;
block.num_pages = (split_addr - GetAddress()) / PageSize;
block.state = state;
block.ipc_lock_count = ipc_lock_count;
block.device_use_count = device_use_count;
block.perm = perm;
block.original_perm = original_perm;
block.attribute = attribute;
m_memory_state = s;
m_permission = p;
m_attribute = static_cast<KMemoryAttribute>(
a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
addr = split_addr;
num_pages -= block.num_pages;
if (set_disable_merge_attr && set_mask != 0) {
m_disable_merge_attribute = m_disable_merge_attribute |
static_cast<KMemoryBlockDisableMergeAttribute>(set_mask);
}
if (clear_mask != 0) {
m_disable_merge_attribute = m_disable_merge_attribute &
static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask);
}
}
return block;
constexpr void Split(KMemoryBlock* block, VAddr addr) {
ASSERT(this->GetAddress() < addr);
ASSERT(this->Contains(addr));
ASSERT(Common::IsAligned(addr, PageSize));
block->m_address = m_address;
block->m_num_pages = (addr - this->GetAddress()) / PageSize;
block->m_memory_state = m_memory_state;
block->m_ipc_lock_count = m_ipc_lock_count;
block->m_device_use_count = m_device_use_count;
block->m_permission = m_permission;
block->m_original_permission = m_original_permission;
block->m_attribute = m_attribute;
block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft);
block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
block->m_device_disable_merge_right_count = 0;
m_address = addr;
m_num_pages -= block->m_num_pages;
m_ipc_disable_merge_count = 0;
m_device_disable_merge_left_count = 0;
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
}
constexpr void UpdateDeviceDisableMergeStateForShareLeft(
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
if (left) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
ASSERT(new_device_disable_merge_left_count > 0);
}
}
constexpr void UpdateDeviceDisableMergeStateForShareRight(
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
if (right) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
ASSERT(new_device_disable_merge_right_count > 0);
}
}
constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left,
bool right) {
this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right);
this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
}
constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// We must either be shared or have a zero lock count.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
m_device_use_count == 0);
// Share.
const u16 new_count = ++m_device_use_count;
ASSERT(new_count > 0);
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared);
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
}
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
if (left) {
if (!m_device_disable_merge_left_count) {
return;
}
--m_device_disable_merge_left_count;
}
m_device_disable_merge_left_count =
std::min(m_device_disable_merge_left_count, m_device_use_count);
if (m_device_disable_merge_left_count == 0) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft);
}
}
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
if (right) {
const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
ASSERT(old_device_disable_merge_right_count > 0);
if (old_device_disable_merge_right_count == 1) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight);
}
}
}
constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left,
bool right) {
this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right);
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
}
constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// We must be shared.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
// Unhare.
const u16 old_count = m_device_use_count--;
ASSERT(old_count > 0);
if (old_count == 1) {
m_attribute =
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
}
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
}
constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// We must be shared.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
// Unhare.
const u16 old_count = m_device_use_count--;
ASSERT(old_count > 0);
if (old_count == 1) {
m_attribute =
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
}
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
}
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
// We must either be locked or have a zero lock count.
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
m_ipc_lock_count == 0);
// Lock.
const u16 new_lock_count = ++m_ipc_lock_count;
ASSERT(new_lock_count > 0);
// If this is our first lock, update our permissions.
if (new_lock_count == 1) {
ASSERT(m_original_permission == KMemoryPermission::None);
ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) ==
(m_permission | KMemoryPermission::NotMapped));
ASSERT((m_permission & KMemoryPermission::UserExecute) !=
KMemoryPermission::UserExecute ||
(new_perm == KMemoryPermission::UserRead));
m_original_permission = m_permission;
m_permission = static_cast<KMemoryPermission>(
(new_perm & KMemoryPermission::IpcLockChangeMask) |
(m_original_permission & ~KMemoryPermission::IpcLockChangeMask));
}
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked);
if (left) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft);
const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
ASSERT(new_ipc_disable_merge_count > 0);
}
}
constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
[[maybe_unused]] bool right) {
// We must be locked.
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);
// Unlock.
const u16 old_lock_count = m_ipc_lock_count--;
ASSERT(old_lock_count > 0);
// If this is our last unlock, update our permissions.
if (old_lock_count == 1) {
ASSERT(m_original_permission != KMemoryPermission::None);
m_permission = m_original_permission;
m_original_permission = KMemoryPermission::None;
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked);
}
if (left) {
const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
ASSERT(old_ipc_disable_merge_count > 0);
if (old_ipc_disable_merge_count == 1) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft);
}
}
}
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
return m_disable_merge_attribute;
}
};
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);

View File

@ -2,221 +2,336 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/memory_types.h"
namespace Kernel {
KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_)
: start_addr{start_addr_}, end_addr{end_addr_} {
const u64 num_pages{(end_addr - start_addr) / PageSize};
memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
KMemoryPermission::None, KMemoryAttribute::None);
KMemoryBlockManager::KMemoryBlockManager() = default;
Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
// Allocate a block to encapsulate the address space, insert it into the tree.
KMemoryBlock* start_block = slab_manager->Allocate();
R_UNLESS(start_block != nullptr, ResultOutOfResource);
// Set our start and end.
m_start_address = st;
m_end_address = nd;
ASSERT(Common::IsAligned(m_start_address, PageSize));
ASSERT(Common::IsAligned(m_end_address, PageSize));
// Initialize and insert the block.
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
m_memory_block_tree.insert(*start_block);
R_SUCCEED();
}
KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) {
auto node{memory_block_tree.begin()};
while (node != end()) {
const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) {
return node;
}
node = std::next(node);
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
HostUnmapCallback&& host_unmap_callback) {
// Erase every block until we have none left.
auto it = m_memory_block_tree.begin();
while (it != m_memory_block_tree.end()) {
KMemoryBlock* block = std::addressof(*it);
it = m_memory_block_tree.erase(it);
slab_manager->Free(block);
host_unmap_callback(block->GetAddress(), block->GetSize());
}
return end();
ASSERT(m_memory_block_tree.empty());
}
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
std::size_t num_pages, std::size_t align,
std::size_t offset, std::size_t guard_pages) {
if (num_pages == 0) {
return {};
}
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
size_t num_pages, size_t alignment, size_t offset,
size_t guard_pages) const {
if (num_pages > 0) {
const VAddr region_end = region_start + region_num_pages * PageSize;
const VAddr region_last = region_end - 1;
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
it++) {
const KMemoryInfo info = it->GetMemoryInfo();
if (region_last < info.GetAddress()) {
break;
}
if (info.m_state != KMemoryState::Free) {
continue;
}
const VAddr region_end{region_start + region_num_pages * PageSize};
const VAddr region_last{region_end - 1};
for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
const auto info{it->GetMemoryInfo()};
if (region_last < info.GetAddress()) {
break;
}
VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
area += guard_pages * PageSize;
if (info.state != KMemoryState::Free) {
continue;
}
const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
area = (area <= offset_area) ? offset_area : offset_area + alignment;
VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
area += guard_pages * PageSize;
const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
const VAddr area_last = area_end - 1;
const VAddr offset_area{Common::AlignDown(area, align) + offset};
area = (area <= offset_area) ? offset_area : offset_area + align;
const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
const VAddr area_last{area_end - 1};
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
area_last <= info.GetLastAddress()) {
return area;
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
area_last <= info.GetLastAddress()) {
return area;
}
}
}
return {};
}
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute,
KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attribute) {
const VAddr update_end_addr{addr + num_pages * PageSize};
iterator node{memory_block_tree.begin()};
void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
VAddr address, size_t num_pages) {
// Find the iterator now that we've updated.
iterator it = this->FindIterator(address);
if (address != m_start_address) {
it--;
}
prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped;
while (node != memory_block_tree.end()) {
KMemoryBlock* block{&(*node)};
iterator next_node{std::next(node)};
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
if (addr < cur_end_addr && cur_addr < update_end_addr) {
if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
node = next_node;
continue;
}
iterator new_node{node};
if (addr > cur_addr) {
memory_block_tree.insert(node, block->Split(addr));
}
if (update_end_addr < cur_end_addr) {
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
}
new_node->Update(state, perm, attribute);
MergeAdjacent(new_node, next_node);
}
if (cur_end_addr - 1 >= update_end_addr - 1) {
// Coalesce blocks that we can.
while (true) {
iterator prev = it++;
if (it == m_memory_block_tree.end()) {
break;
}
node = next_node;
}
}
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state,
KMemoryPermission perm, KMemoryAttribute attribute) {
const VAddr update_end_addr{addr + num_pages * PageSize};
iterator node{memory_block_tree.begin()};
while (node != memory_block_tree.end()) {
KMemoryBlock* block{&(*node)};
iterator next_node{std::next(node)};
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
if (addr < cur_end_addr && cur_addr < update_end_addr) {
iterator new_node{node};
if (addr > cur_addr) {
memory_block_tree.insert(node, block->Split(addr));
}
if (update_end_addr < cur_end_addr) {
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
}
new_node->Update(state, perm, attribute);
MergeAdjacent(new_node, next_node);
if (prev->CanMergeWith(*it)) {
KMemoryBlock* block = std::addressof(*it);
m_memory_block_tree.erase(it);
prev->Add(*block);
allocator->Free(block);
it = prev;
}
if (cur_end_addr - 1 >= update_end_addr - 1) {
if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
break;
}
node = next_node;
}
}
void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
size_t num_pages, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
VAddr cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if (it->HasProperties(state, perm, attr)) {
// If we already have the right properties, just advance.
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages =
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
}
} else {
// If we need to, create a new block before and insert it.
if (cur_info.GetAddress() != cur_address) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
// If we need to, create a new block after and insert it.
if (cur_info.GetSize() > remaining_size) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
// Update block state.
it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
static_cast<u8>(clear_disable_attr));
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
VAddr address, size_t num_pages, KMemoryState test_state,
KMemoryPermission test_perm, KMemoryAttribute test_attr,
KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
VAddr cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if (it->HasProperties(test_state, test_perm, test_attr) &&
!it->HasProperties(state, perm, attr)) {
// If we need to, create a new block before and insert it.
if (cur_info.GetAddress() != cur_address) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
// If we need to, create a new block after and insert it.
if (cur_info.GetSize() > remaining_size) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
// Update block state.
it->Update(state, perm, attr, false, 0, 0);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
// If we already have the right properties, just advance.
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages =
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
}
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
size_t num_pages, MemoryBlockLockFunction lock_func,
KMemoryPermission perm) {
const VAddr update_end_addr{addr + num_pages * PageSize};
iterator node{memory_block_tree.begin()};
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
while (node != memory_block_tree.end()) {
KMemoryBlock* block{&(*node)};
iterator next_node{std::next(node)};
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
VAddr cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
if (addr < cur_end_addr && cur_addr < update_end_addr) {
iterator new_node{node};
const VAddr end_address = address + (num_pages * PageSize);
if (addr > cur_addr) {
memory_block_tree.insert(node, block->Split(addr));
}
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if (update_end_addr < cur_end_addr) {
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
}
// If we need to, create a new block before and insert it.
if (cur_info.m_address != cur_address) {
KMemoryBlock* new_block = allocator->Allocate();
lock_func(new_node, perm);
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
MergeAdjacent(new_node, next_node);
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
if (cur_end_addr - 1 >= update_end_addr - 1) {
break;
if (cur_info.GetSize() > remaining_size) {
// If we need to, create a new block after and insert it.
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
node = next_node;
// Call the locked update function.
(std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address,
cur_info.GetEndAddress() == end_address);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
const_iterator it{FindIterator(start)};
KMemoryInfo info{};
do {
info = it->GetMemoryInfo();
func(info);
it = std::next(it);
} while (info.addr + info.size - 1 < end - 1 && it != cend());
}
// Debug.
bool KMemoryBlockManager::CheckState() const {
// Loop over every block, ensuring that we are sorted and coalesced.
auto it = m_memory_block_tree.cbegin();
auto prev = it++;
while (it != m_memory_block_tree.cend()) {
const KMemoryInfo prev_info = prev->GetMemoryInfo();
const KMemoryInfo cur_info = it->GetMemoryInfo();
void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
KMemoryBlock* block{&(*it)};
auto EraseIt = [&](const iterator it_to_erase) {
if (next_it == it_to_erase) {
next_it = std::next(next_it);
// Sequential blocks which can be merged should be merged.
if (prev->CanMergeWith(*it)) {
return false;
}
memory_block_tree.erase(it_to_erase);
};
if (it != memory_block_tree.begin()) {
KMemoryBlock* prev{&(*std::prev(it))};
// Sequential blocks should be sequential.
if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
return false;
}
if (block->HasSameProperties(*prev)) {
const iterator prev_it{std::prev(it)};
// If the block is ipc locked, it must have a count.
if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
cur_info.m_ipc_lock_count == 0) {
return false;
}
prev->Add(block->GetNumPages());
EraseIt(it);
// If the block is device shared, it must have a count.
if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
cur_info.m_device_use_count == 0) {
return false;
}
it = prev_it;
block = prev;
// Advance the iterator.
prev = it++;
}
// Our loop will miss checking the last block, potentially, so check it.
if (prev != m_memory_block_tree.cend()) {
const KMemoryInfo prev_info = prev->GetMemoryInfo();
// If the block is ipc locked, it must have a count.
if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
prev_info.m_ipc_lock_count == 0) {
return false;
}
// If the block is device shared, it must have a count.
if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
prev_info.m_device_use_count == 0) {
return false;
}
}
if (it != cend()) {
const KMemoryBlock* const next{&(*std::next(it))};
if (block->HasSameProperties(*next)) {
block->Add(next->GetNumPages());
EraseIt(std::next(it));
}
}
return true;
}
} // namespace Kernel

View File

@ -4,63 +4,154 @@
#pragma once
#include <functional>
#include <list>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_memory_block.h"
namespace Kernel {
class KMemoryBlockManagerUpdateAllocator {
public:
static constexpr size_t MaxBlocks = 2;
private:
KMemoryBlock* m_blocks[MaxBlocks];
size_t m_index;
KMemoryBlockSlabManager* m_slab_manager;
private:
Result Initialize(size_t num_blocks) {
// Check num blocks.
ASSERT(num_blocks <= MaxBlocks);
// Set index.
m_index = MaxBlocks - num_blocks;
// Allocate the blocks.
for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) {
m_blocks[m_index + i] = m_slab_manager->Allocate();
R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource);
}
R_SUCCEED();
}
public:
KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm,
size_t num_blocks = MaxBlocks)
: m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) {
*out_result = this->Initialize(num_blocks);
}
~KMemoryBlockManagerUpdateAllocator() {
for (const auto& block : m_blocks) {
if (block != nullptr) {
m_slab_manager->Free(block);
}
}
}
KMemoryBlock* Allocate() {
ASSERT(m_index < MaxBlocks);
ASSERT(m_blocks[m_index] != nullptr);
KMemoryBlock* block = nullptr;
std::swap(block, m_blocks[m_index++]);
return block;
}
void Free(KMemoryBlock* block) {
ASSERT(m_index <= MaxBlocks);
ASSERT(block != nullptr);
if (m_index == 0) {
m_slab_manager->Free(block);
} else {
m_blocks[--m_index] = block;
}
}
};
class KMemoryBlockManager final {
public:
using MemoryBlockTree = std::list<KMemoryBlock>;
using MemoryBlockTree =
Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left,
bool right);
using iterator = MemoryBlockTree::iterator;
using const_iterator = MemoryBlockTree::const_iterator;
public:
KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_);
KMemoryBlockManager();
using HostUnmapCallback = std::function<void(VAddr, u64)>;
Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
iterator end() {
return memory_block_tree.end();
return m_memory_block_tree.end();
}
const_iterator end() const {
return memory_block_tree.end();
return m_memory_block_tree.end();
}
const_iterator cend() const {
return memory_block_tree.cend();
return m_memory_block_tree.cend();
}
iterator FindIterator(VAddr addr);
VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
size_t alignment, size_t offset, size_t guard_pages) const;
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
std::size_t align, std::size_t offset, std::size_t guard_pages);
void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr);
void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
MemoryBlockLockFunction lock_func, KMemoryPermission perm);
void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state,
KMemoryPermission perm, KMemoryAttribute attribute);
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr);
void Update(VAddr addr, std::size_t num_pages, KMemoryState state,
KMemoryPermission perm = KMemoryPermission::None,
KMemoryAttribute attribute = KMemoryAttribute::None);
using LockFunc = std::function<void(iterator, KMemoryPermission)>;
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
KMemoryPermission perm);
using IterateFunc = std::function<void(const KMemoryInfo&)>;
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
KMemoryBlock& FindBlock(VAddr addr) {
return *FindIterator(addr);
iterator FindIterator(VAddr address) const {
return m_memory_block_tree.find(KMemoryBlock(
address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
}
const KMemoryBlock* FindBlock(VAddr address) const {
if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
return std::addressof(*it);
}
return nullptr;
}
// Debug.
bool CheckState() const;
private:
void MergeAdjacent(iterator it, iterator& next_it);
void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
size_t num_pages);
[[maybe_unused]] const VAddr start_addr;
[[maybe_unused]] const VAddr end_addr;
MemoryBlockTree m_memory_block_tree;
VAddr m_start_address{};
VAddr m_end_address{};
};
MemoryBlockTree memory_block_tree;
class KScopedMemoryBlockManagerAuditor {
private:
KMemoryBlockManager* m_manager;
public:
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {
ASSERT(m_manager->CheckState());
}
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m)
: KScopedMemoryBlockManagerAuditor(std::addressof(m)) {}
~KScopedMemoryBlockManagerAuditor() {
ASSERT(m_manager->CheckState());
}
};
} // namespace Kernel

View File

@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
// Set all the allocated memory.
for (const auto& block : out->Nodes()) {
std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern,
std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
block.GetSize());
}

View File

@ -12,7 +12,7 @@ namespace Kernel {
KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
ASSERT(Common::IsAligned(phys_addr, PageSize));
return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr));
return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
}
} // namespace Kernel

File diff suppressed because it is too large Load Diff

View File

@ -9,8 +9,10 @@
#include "common/common_types.h"
#include "common/page_table.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/result.h"
@ -34,58 +36,66 @@ public:
~KPageTable();
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool);
Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
VAddr code_addr, size_t code_size,
KMemoryBlockSlabManager* mem_block_slab_manager,
KMemoryManager::Pool pool);
void Finalize();
Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
KMemoryPermission perm);
Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
ICacheInvalidationStrategy icache_invalidation_strategy);
Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
VAddr src_addr);
Result MapPhysicalMemory(VAddr addr, std::size_t size);
Result UnmapPhysicalMemory(VAddr addr, std::size_t size);
Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
Result MapPhysicalMemory(VAddr addr, size_t size);
Result UnmapPhysicalMemory(VAddr addr, size_t size);
Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
KMemoryPermission perm);
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
KMemoryState state, KMemoryPermission perm) {
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
state, perm);
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
this->GetRegionAddress(state),
this->GetRegionSize(state) / PageSize, state, perm));
}
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm);
Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr);
Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
Result ResetTransferMemory(VAddr addr, std::size_t size);
Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
Result SetMaxHeapSize(std::size_t size);
Result SetHeapSize(VAddr* out, std::size_t size);
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
bool is_map_only, VAddr region_start,
std::size_t region_num_pages, KMemoryState state,
KMemoryPermission perm, PAddr map_addr = 0);
Result LockForDeviceAddressSpace(VAddr addr, std::size_t size);
Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg);
Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
Result SetMaxHeapSize(size_t size);
Result SetHeapSize(VAddr* out, size_t size);
ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
VAddr region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm,
PAddr map_addr = 0);
Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
bool is_aligned);
Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size);
Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr);
Common::PageTable& PageTableImpl() {
return page_table_impl;
return *m_page_table_impl;
}
const Common::PageTable& PageTableImpl() const {
return page_table_impl;
return *m_page_table_impl;
}
bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
private:
enum class OperationType : u32 {
@ -96,67 +106,65 @@ private:
ChangePermissionsAndRefresh,
};
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask |
KMemoryAttribute::IpcLocked |
KMemoryAttribute::DeviceShared;
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
Result InitializeMemoryLayout(VAddr start, VAddr end);
Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
bool is_pa_valid, VAddr region_start, std::size_t region_num_pages,
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
bool is_pa_valid, VAddr region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm);
Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const;
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list);
void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
KMemoryInfo QueryInfoImpl(VAddr addr);
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
std::size_t align);
Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
size_t align);
Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
OperationType operation);
Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
OperationType operation, PAddr map_addr = 0);
Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
PAddr map_addr = 0);
VAddr GetRegionAddress(KMemoryState state) const;
std::size_t GetRegionSize(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
std::size_t alignment, std::size_t offset, std::size_t guard_pages);
VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
size_t alignment, size_t offset, size_t guard_pages);
Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr) const {
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
perm, attr_mask, attr);
R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
perm, attr_mask, attr));
}
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr,
std::size_t size, KMemoryState state_mask, KMemoryState state,
KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
size_t size, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
state_mask, state, perm_mask, perm, attr_mask, attr,
ignore_attr));
}
Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
attr_mask, attr, ignore_attr);
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
attr_mask, attr, ignore_attr));
}
Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
@ -174,13 +182,13 @@ private:
bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
return general_lock.IsLockedByCurrentThread();
return m_general_lock.IsLockedByCurrentThread();
}
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
ASSERT(this->IsLockedByCurrentThread());
return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
}
bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
@ -191,95 +199,93 @@ private:
return *out != 0;
}
mutable KLightLock general_lock;
mutable KLightLock map_physical_memory_lock;
std::unique_ptr<KMemoryBlockManager> block_manager;
mutable KLightLock m_general_lock;
mutable KLightLock m_map_physical_memory_lock;
public:
constexpr VAddr GetAddressSpaceStart() const {
return address_space_start;
return m_address_space_start;
}
constexpr VAddr GetAddressSpaceEnd() const {
return address_space_end;
return m_address_space_end;
}
constexpr std::size_t GetAddressSpaceSize() const {
return address_space_end - address_space_start;
constexpr size_t GetAddressSpaceSize() const {
return m_address_space_end - m_address_space_start;
}
constexpr VAddr GetHeapRegionStart() const {
return heap_region_start;
return m_heap_region_start;
}
constexpr VAddr GetHeapRegionEnd() const {
return heap_region_end;
return m_heap_region_end;
}
constexpr std::size_t GetHeapRegionSize() const {
return heap_region_end - heap_region_start;
constexpr size_t GetHeapRegionSize() const {
return m_heap_region_end - m_heap_region_start;
}
constexpr VAddr GetAliasRegionStart() const {
return alias_region_start;
return m_alias_region_start;
}
constexpr VAddr GetAliasRegionEnd() const {
return alias_region_end;
return m_alias_region_end;
}
constexpr std::size_t GetAliasRegionSize() const {
return alias_region_end - alias_region_start;
constexpr size_t GetAliasRegionSize() const {
return m_alias_region_end - m_alias_region_start;
}
constexpr VAddr GetStackRegionStart() const {
return stack_region_start;
return m_stack_region_start;
}
constexpr VAddr GetStackRegionEnd() const {
return stack_region_end;
return m_stack_region_end;
}
constexpr std::size_t GetStackRegionSize() const {
return stack_region_end - stack_region_start;
constexpr size_t GetStackRegionSize() const {
return m_stack_region_end - m_stack_region_start;
}
constexpr VAddr GetKernelMapRegionStart() const {
return kernel_map_region_start;
return m_kernel_map_region_start;
}
constexpr VAddr GetKernelMapRegionEnd() const {
return kernel_map_region_end;
return m_kernel_map_region_end;
}
constexpr VAddr GetCodeRegionStart() const {
return code_region_start;
return m_code_region_start;
}
constexpr VAddr GetCodeRegionEnd() const {
return code_region_end;
return m_code_region_end;
}
constexpr VAddr GetAliasCodeRegionStart() const {
return alias_code_region_start;
return m_alias_code_region_start;
}
constexpr VAddr GetAliasCodeRegionSize() const {
return alias_code_region_end - alias_code_region_start;
return m_alias_code_region_end - m_alias_code_region_start;
}
std::size_t GetNormalMemorySize() {
KScopedLightLock lk(general_lock);
return GetHeapSize() + mapped_physical_memory_size;
size_t GetNormalMemorySize() {
KScopedLightLock lk(m_general_lock);
return GetHeapSize() + m_mapped_physical_memory_size;
}
constexpr std::size_t GetAddressSpaceWidth() const {
return address_space_width;
constexpr size_t GetAddressSpaceWidth() const {
return m_address_space_width;
}
constexpr std::size_t GetHeapSize() const {
return current_heap_end - heap_region_start;
constexpr size_t GetHeapSize() const {
return m_current_heap_end - m_heap_region_start;
}
constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
return address_space_start <= address && address + size - 1 <= address_space_end - 1;
constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
}
constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
return alias_region_start > address || address + size - 1 > alias_region_end - 1;
constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
}
constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
return stack_region_start > address || address + size - 1 > stack_region_end - 1;
constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
}
constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
}
constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
return address + size > heap_region_start && heap_region_end > address;
constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
return address + size > m_heap_region_start && m_heap_region_end > address;
}
constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
return address + size > alias_region_start && alias_region_end > address;
constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
return address + size > m_alias_region_start && m_alias_region_end > address;
}
constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
if (IsInvalidRegion(address, size)) {
return true;
}
@ -291,73 +297,78 @@ public:
}
return {};
}
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
return !IsOutsideASLRRegion(address, size);
}
constexpr std::size_t GetNumGuardPages() const {
constexpr size_t GetNumGuardPages() const {
return IsKernel() ? 1 : 4;
}
PAddr GetPhysicalAddr(VAddr addr) const {
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
ASSERT(backing_addr);
return backing_addr + addr;
}
constexpr bool Contains(VAddr addr) const {
return address_space_start <= addr && addr <= address_space_end - 1;
return m_address_space_start <= addr && addr <= m_address_space_end - 1;
}
constexpr bool Contains(VAddr addr, std::size_t size) const {
return address_space_start <= addr && addr < addr + size &&
addr + size - 1 <= address_space_end - 1;
constexpr bool Contains(VAddr addr, size_t size) const {
return m_address_space_start <= addr && addr < addr + size &&
addr + size - 1 <= m_address_space_end - 1;
}
private:
constexpr bool IsKernel() const {
return is_kernel;
return m_is_kernel;
}
constexpr bool IsAslrEnabled() const {
return is_aslr_enabled;
return m_enable_aslr;
}
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
return (address_space_start <= addr) &&
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
(addr + num_pages * PageSize - 1 <= address_space_end - 1);
constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
return (m_address_space_start <= addr) &&
(num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
(addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
}
private:
VAddr address_space_start{};
VAddr address_space_end{};
VAddr heap_region_start{};
VAddr heap_region_end{};
VAddr current_heap_end{};
VAddr alias_region_start{};
VAddr alias_region_end{};
VAddr stack_region_start{};
VAddr stack_region_end{};
VAddr kernel_map_region_start{};
VAddr kernel_map_region_end{};
VAddr code_region_start{};
VAddr code_region_end{};
VAddr alias_code_region_start{};
VAddr alias_code_region_end{};
VAddr m_address_space_start{};
VAddr m_address_space_end{};
VAddr m_heap_region_start{};
VAddr m_heap_region_end{};
VAddr m_current_heap_end{};
VAddr m_alias_region_start{};
VAddr m_alias_region_end{};
VAddr m_stack_region_start{};
VAddr m_stack_region_end{};
VAddr m_kernel_map_region_start{};
VAddr m_kernel_map_region_end{};
VAddr m_code_region_start{};
VAddr m_code_region_end{};
VAddr m_alias_code_region_start{};
VAddr m_alias_code_region_end{};
std::size_t mapped_physical_memory_size{};
std::size_t max_heap_size{};
std::size_t max_physical_memory_size{};
std::size_t address_space_width{};
size_t m_mapped_physical_memory_size{};
size_t m_max_heap_size{};
size_t m_max_physical_memory_size{};
size_t m_address_space_width{};
bool is_kernel{};
bool is_aslr_enabled{};
KMemoryBlockManager m_memory_block_manager;
u32 heap_fill_value{};
const KMemoryRegion* cached_physical_heap_region{};
bool m_is_kernel{};
bool m_enable_aslr{};
bool m_enable_device_address_space_merge{};
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
KMemoryBlockSlabManager* m_memory_block_slab_manager{};
Common::PageTable page_table_impl;
u32 m_heap_fill_value{};
const KMemoryRegion* m_cached_physical_heap_region{};
Core::System& system;
KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
std::unique_ptr<Common::PageTable> m_page_table_impl;
Core::System& m_system;
};
} // namespace Kernel

View File

@ -72,7 +72,8 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
process->name = std::move(process_name);
process->resource_limit = res_limit;
process->status = ProcessStatus::Created;
process->system_resource_address = 0;
process->state = State::Created;
process->program_id = 0;
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
: kernel.CreateNewUserProcessID();
@ -92,11 +93,12 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
process->exception_thread = nullptr;
process->is_suspended = false;
process->schedule_count = 0;
process->is_handle_table_initialized = false;
// Open a reference to the resource limit.
process->resource_limit->Open();
return ResultSuccess;
R_SUCCEED();
}
void KProcess::DoWorkerTaskImpl() {
@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() {
}
}
u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
u64 KProcess::GetTotalPhysicalMemoryAvailable() {
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size +
page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
main_thread_stack_size};
if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
capacity != pool_size) {
@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
return memory_usage_capacity;
}
u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
}
u64 KProcess::GetTotalPhysicalMemoryUsed() const {
return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() +
u64 KProcess::GetTotalPhysicalMemoryUsed() {
return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() +
GetSystemResourceSize();
}
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
}
@ -244,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
shmem->Open();
shemen_info->Open();
return ResultSuccess;
R_SUCCEED();
}
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
@ -289,12 +291,12 @@ Result KProcess::Reset() {
KScopedSchedulerLock sl{kernel};
// Validate that we're in a state that we can reset.
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
R_UNLESS(state != State::Terminated, ResultInvalidState);
R_UNLESS(is_signaled, ResultInvalidState);
// Clear signaled.
is_signaled = false;
return ResultSuccess;
R_SUCCEED();
}
Result KProcess::SetActivity(ProcessActivity activity) {
@ -304,15 +306,13 @@ Result KProcess::SetActivity(ProcessActivity activity) {
KScopedSchedulerLock sl{kernel};
// Validate our state.
R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState);
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
R_UNLESS(state != State::Terminating, ResultInvalidState);
R_UNLESS(state != State::Terminated, ResultInvalidState);
// Either pause or resume.
if (activity == ProcessActivity::Paused) {
// Verify that we're not suspended.
if (is_suspended) {
return ResultInvalidState;
}
R_UNLESS(!is_suspended, ResultInvalidState);
// Suspend all threads.
for (auto* thread : GetThreadList()) {
@ -325,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
ASSERT(activity == ProcessActivity::Runnable);
// Verify that we're suspended.
if (!is_suspended) {
return ResultInvalidState;
}
R_UNLESS(is_suspended, ResultInvalidState);
// Resume all threads.
for (auto* thread : GetThreadList()) {
@ -338,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
SetSuspended(false);
}
return ResultSuccess;
R_SUCCEED();
}
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
@ -348,35 +346,38 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
system_resource_size = metadata.GetSystemResourceSize();
image_size = code_size;
// We currently do not support process-specific system resource
UNIMPLEMENTED_IF(system_resource_size != 0);
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
code_size + system_resource_size);
if (!memory_reservation.Succeeded()) {
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
code_size + system_resource_size);
return ResultLimitReached;
R_RETURN(ResultLimitReached);
}
// Initialize proces address space
if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
0x8000000, code_size,
KMemoryManager::Pool::Application)};
if (const Result result{page_table.InitializeForProcess(
metadata.GetAddressSpaceType(), false, 0x8000000, code_size,
&kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)};
result.IsError()) {
return result;
R_RETURN(result);
}
// Map process code region
if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
code_size / PageSize, KMemoryState::Code,
KMemoryPermission::None)};
if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(),
code_size / PageSize, KMemoryState::Code,
KMemoryPermission::None)};
result.IsError()) {
return result;
R_RETURN(result);
}
// Initialize process capabilities
const auto& caps{metadata.GetKernelCapabilities()};
if (const Result result{
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
result.IsError()) {
return result;
R_RETURN(result);
}
// Set memory usage capacity
@ -384,12 +385,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
case FileSys::ProgramAddressSpaceType::Is32Bit:
case FileSys::ProgramAddressSpaceType::Is36Bit:
case FileSys::ProgramAddressSpaceType::Is39Bit:
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart();
memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart();
break;
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() +
page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart();
memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() +
page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart();
break;
default:
@ -397,10 +398,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
}
// Create TLS region
R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address)));
R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
memory_reservation.Commit();
return handle_table.Initialize(capabilities.GetHandleTableSize());
R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
}
void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
@ -409,15 +410,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError());
ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
ChangeStatus(ProcessStatus::Running);
ChangeState(State::Running);
SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
}
void KProcess::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exiting);
ChangeState(State::Terminating);
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
for (auto* thread : in_thread_list) {
@ -437,15 +438,15 @@ void KProcess::PrepareForTermination() {
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
this->DeleteThreadLocalRegion(tls_region_address);
tls_region_address = 0;
this->DeleteThreadLocalRegion(plr_address);
plr_address = 0;
if (resource_limit) {
resource_limit->Release(LimitableResource::PhysicalMemory,
main_thread_stack_size + image_size);
}
ChangeStatus(ProcessStatus::Exited);
ChangeState(State::Terminated);
}
void KProcess::Finalize() {
@ -474,7 +475,7 @@ void KProcess::Finalize() {
}
// Finalize the page table.
page_table.reset();
page_table.Finalize();
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
@ -499,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
}
*out = tlr;
return ResultSuccess;
R_SUCCEED();
}
}
@ -528,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
// We succeeded!
tlp_guard.Cancel();
*out = tlr;
return ResultSuccess;
R_SUCCEED();
}
Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
@ -576,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
KThreadLocalPage::Free(kernel, page_to_free);
}
return ResultSuccess;
R_SUCCEED();
}
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
@ -628,7 +629,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
Svc::MemoryPermission permission) {
page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
};
kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
@ -645,19 +646,18 @@ bool KProcess::IsSignaled() const {
}
KProcess::KProcess(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>(
kernel_.System())},
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()},
handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
state_lock{kernel_}, list_lock{kernel_} {}
KProcess::~KProcess() = default;
void KProcess::ChangeStatus(ProcessStatus new_status) {
if (status == new_status) {
void KProcess::ChangeState(State new_state) {
if (state == new_state) {
return;
}
status = new_status;
state = new_state;
is_signaled = true;
NotifyAvailable();
}
@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
// The kernel always ensures that the given stack size is page aligned.
main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
const VAddr start{page_table->GetStackRegionStart()};
const std::size_t size{page_table->GetStackRegionEnd() - start};
const VAddr start{page_table.GetStackRegionStart()};
const std::size_t size{page_table.GetStackRegionEnd() - start};
CASCADE_RESULT(main_thread_stack_top,
page_table->AllocateAndMapMemory(
page_table.AllocateAndMapMemory(
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
main_thread_stack_top += main_thread_stack_size;
return ResultSuccess;
R_SUCCEED();
}
} // namespace Kernel

View File

@ -13,6 +13,7 @@
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_worker_task.h"
@ -31,7 +32,6 @@ class ProgramMetadata;
namespace Kernel {
class KernelCore;
class KPageTable;
class KResourceLimit;
class KThread;
class KSharedMemoryInfo;
@ -45,24 +45,6 @@ enum class MemoryRegion : u16 {
BASE = 3,
};
/**
* Indicates the status of a Process instance.
*
* @note These match the values as used by kernel,
* so new entries should only be added if RE
* shows that a new value has been introduced.
*/
enum class ProcessStatus {
Created,
CreatedWithDebuggerAttached,
Running,
WaitingForDebuggerToAttach,
DebuggerAttached,
Exiting,
Exited,
DebugBreak,
};
enum class ProcessActivity : u32 {
Runnable,
Paused,
@ -89,6 +71,17 @@ public:
explicit KProcess(KernelCore& kernel_);
~KProcess() override;
enum class State {
Created = Svc::ProcessState_Created,
CreatedAttached = Svc::ProcessState_CreatedAttached,
Running = Svc::ProcessState_Running,
Crashed = Svc::ProcessState_Crashed,
RunningAttached = Svc::ProcessState_RunningAttached,
Terminating = Svc::ProcessState_Terminating,
Terminated = Svc::ProcessState_Terminated,
DebugBreak = Svc::ProcessState_DebugBreak,
};
enum : u64 {
/// Lowest allowed process ID for a kernel initial process.
InitialKIPIDMin = 1,
@ -114,12 +107,12 @@ public:
/// Gets a reference to the process' page table.
KPageTable& PageTable() {
return *page_table;
return page_table;
}
/// Gets const a reference to the process' page table.
const KPageTable& PageTable() const {
return *page_table;
return page_table;
}
/// Gets a reference to the process' handle table.
@ -145,26 +138,25 @@ public:
}
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
return condition_var.Wait(address, cv_key, tag, ns);
R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
}
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
return address_arbiter.SignalToAddress(address, signal_type, value, count);
R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
}
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
s64 timeout) {
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
}
/// Gets the address to the process' dedicated TLS region.
VAddr GetTLSRegionAddress() const {
return tls_region_address;
VAddr GetProcessLocalRegionAddress() const {
return plr_address;
}
/// Gets the current status of the process
ProcessStatus GetStatus() const {
return status;
State GetState() const {
return state;
}
/// Gets the unique ID that identifies this particular process.
@ -286,18 +278,18 @@ public:
}
/// Retrieves the total physical memory available to this process in bytes.
u64 GetTotalPhysicalMemoryAvailable() const;
u64 GetTotalPhysicalMemoryAvailable();
/// Retrieves the total physical memory available to this process in bytes,
/// without the size of the personal system resource heap added to it.
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const;
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
/// Retrieves the total physical memory used by this process in bytes.
u64 GetTotalPhysicalMemoryUsed() const;
u64 GetTotalPhysicalMemoryUsed();
/// Retrieves the total physical memory used by this process in bytes,
/// without the size of the personal system resource heap added to it.
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
/// Gets the list of all threads created with this process as their owner.
std::list<KThread*>& GetThreadList() {
@ -415,19 +407,24 @@ private:
pinned_threads[core_id] = nullptr;
}
/// Changes the process status. If the status is different
/// from the current process status, then this will trigger
/// a process signal.
void ChangeStatus(ProcessStatus new_status);
void FinalizeHandleTable() {
// Finalize the table.
handle_table.Finalize();
// Note that the table is finalized.
is_handle_table_initialized = false;
}
void ChangeState(State new_state);
/// Allocates the main thread stack for the process, given the stack size in bytes.
Result AllocateMainThreadStack(std::size_t stack_size);
/// Memory manager for this process
std::unique_ptr<KPageTable> page_table;
KPageTable page_table;
/// Current status of the process
ProcessStatus status{};
State state{};
/// The ID of this process
u64 process_id = 0;
@ -443,6 +440,8 @@ private:
/// Resource limit descriptor for this process
KResourceLimit* resource_limit{};
VAddr system_resource_address{};
/// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 ideal_core = 0;
@ -469,7 +468,7 @@ private:
KConditionVariable condition_var;
/// Address indicating the location of the process' dedicated TLS region.
VAddr tls_region_address = 0;
VAddr plr_address = 0;
/// Random values for svcGetInfo RandomEntropy
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
@ -495,8 +494,12 @@ private:
/// Schedule count of this process
s64 schedule_count{};
size_t memory_release_hint{};
bool is_signaled{};
bool is_suspended{};
bool is_immortal{};
bool is_handle_table_initialized{};
bool is_initialized{};
std::atomic<u16> num_running_threads{};

View File

@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
is_initialized = true;
// Clear all pages in the memory.
std::memset(device_memory_.GetPointer(physical_address_), 0, size_);
std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_);
return ResultSuccess;
}

View File

@ -54,7 +54,7 @@ public:
* @return A pointer to the shared memory block from the specified offset
*/
u8* GetPointer(std::size_t offset = 0) {
return device_memory->GetPointer(physical_address + offset);
return device_memory->GetPointer<u8>(physical_address + offset);
}
/**
@ -63,7 +63,7 @@ public:
* @return A pointer to the shared memory block from the specified offset
*/
const u8* GetPointer(std::size_t offset = 0) const {
return device_memory->GetPointer(physical_address + offset);
return device_memory->GetPointer<u8>(physical_address + offset);
}
void Finalize() override;

View File

@ -30,6 +30,7 @@
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
#include "core/memory.h"
@ -38,6 +39,9 @@
#endif
namespace {
constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1;
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
u32 entry_point, u32 arg) {
context = {};
@ -241,7 +245,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
}
}
return ResultSuccess;
R_SUCCEED();
}
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
@ -254,7 +258,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_
thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
return ResultSuccess;
R_SUCCEED();
}
Result KThread::InitializeDummyThread(KThread* thread) {
@ -264,31 +268,32 @@ Result KThread::InitializeDummyThread(KThread* thread) {
// Initialize emulation parameters.
thread->stack_parameters.disable_count = 0;
return ResultSuccess;
R_SUCCEED();
}
Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
system.GetCpuManager().GetGuestActivateFunc());
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc()));
}
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
system.GetCpuManager().GetIdleThreadStartFunc());
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc()));
}
Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
KThreadFunction func, uintptr_t arg, s32 virt_core) {
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
system.GetCpuManager().GetShutdownThreadStartFunc());
R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr,
ThreadType::HighPriority,
system.GetCpuManager().GetShutdownThreadStartFunc()));
}
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread);
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc());
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
}
void KThread::PostDestroy(uintptr_t arg) {
@ -538,7 +543,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
*out_ideal_core = virtual_ideal_core_id;
*out_affinity_mask = virtual_affinity_mask;
return ResultSuccess;
R_SUCCEED();
}
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
@ -554,7 +559,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask)
*out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
}
return ResultSuccess;
R_SUCCEED();
}
Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
@ -666,7 +671,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
} while (retry_update);
}
return ResultSuccess;
R_SUCCEED();
}
void KThread::SetBasePriority(s32 value) {
@ -839,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
} while (thread_is_current);
}
return ResultSuccess;
R_SUCCEED();
}
Result KThread::GetThreadContext3(std::vector<u8>& out) {
@ -874,7 +879,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
}
}
return ResultSuccess;
R_SUCCEED();
}
void KThread::AddWaiterImpl(KThread* thread) {
@ -1038,7 +1043,7 @@ Result KThread::Run() {
// Set our state and finish.
SetState(ThreadState::Runnable);
return ResultSuccess;
R_SUCCEED();
}
}
@ -1073,6 +1078,78 @@ void KThread::Exit() {
UNREACHABLE_MSG("KThread::Exit() would return");
}
Result KThread::Terminate() {
ASSERT(this != GetCurrentThreadPointer(kernel));
// Request the thread terminate if it hasn't already.
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
// If the thread isn't terminated, wait for it to terminate.
s32 index;
KSynchronizationObject* objects[] = {this};
R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1,
Svc::WaitInfinite));
}
R_SUCCEED();
}
ThreadState KThread::RequestTerminate() {
ASSERT(this != GetCurrentThreadPointer(kernel));
KScopedSchedulerLock sl{kernel};
// Determine if this is the first termination request.
const bool first_request = [&]() -> bool {
// Perform an atomic compare-and-swap from false to true.
bool expected = false;
return termination_requested.compare_exchange_strong(expected, true);
}();
// If this is the first request, start termination procedure.
if (first_request) {
// If the thread is in initialized state, just change state to terminated.
if (this->GetState() == ThreadState::Initialized) {
thread_state = ThreadState::Terminated;
return ThreadState::Terminated;
}
// Register the terminating dpc.
this->RegisterDpc(DpcFlag::Terminating);
// If the thread is pinned, unpin it.
if (this->GetStackParameters().is_pinned) {
this->GetOwnerProcess()->UnpinThread(this);
}
// If the thread is suspended, continue it.
if (this->IsSuspended()) {
suspend_allowed_flags = 0;
this->UpdateState();
}
// Change the thread's priority to be higher than any system thread's.
if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) {
this->SetBasePriority(TerminatingThreadPriority);
}
// If the thread is runnable, send a termination interrupt to other cores.
if (this->GetState() == ThreadState::Runnable) {
if (const u64 core_mask =
physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel));
core_mask != 0) {
Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask);
}
}
// Wake up the thread.
if (this->GetState() == ThreadState::Waiting) {
wait_queue->CancelWait(this, ResultTerminationRequested, true);
}
}
return this->GetState();
}
Result KThread::Sleep(s64 timeout) {
ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
ASSERT(this == GetCurrentThreadPointer(kernel));
@ -1086,7 +1163,7 @@ Result KThread::Sleep(s64 timeout) {
// Check if the thread should terminate.
if (this->IsTerminationRequested()) {
slp.CancelSleep();
return ResultTerminationRequested;
R_THROW(ResultTerminationRequested);
}
// Wait for the sleep to end.
@ -1094,7 +1171,7 @@ Result KThread::Sleep(s64 timeout) {
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
}
return ResultSuccess;
R_SUCCEED();
}
void KThread::IfDummyThreadTryWait() {

View File

@ -180,6 +180,10 @@ public:
void Exit();
Result Terminate();
ThreadState RequestTerminate();
[[nodiscard]] u32 GetSuspendFlags() const {
return suspend_allowed_flags & suspend_request_flags;
}

View File

@ -24,6 +24,7 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
@ -73,8 +74,16 @@ struct KernelCore::Impl {
InitializeMemoryLayout();
Init::InitializeKPageBufferSlabHeap(system);
InitializeShutdownThreads();
InitializePreemption(kernel);
InitializePhysicalCores();
InitializePreemption(kernel);
// Initialize the Dynamic Slab Heaps.
{
const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion();
ASSERT(pt_heap_region.GetEndAddress() != 0);
InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
}
RegisterHostThread();
}
@ -86,6 +95,15 @@ struct KernelCore::Impl {
}
}
void CloseCurrentProcess() {
(*current_process).Finalize();
// current_process->Close();
// TODO: The current process should be destroyed based on accurate ref counting after
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
(*current_process).Destroy();
current_process = nullptr;
}
void Shutdown() {
is_shutting_down.store(true, std::memory_order_relaxed);
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
@ -99,10 +117,6 @@ struct KernelCore::Impl {
next_user_process_id = KProcess::ProcessIDMin;
next_thread_id = 1;
for (auto& core : cores) {
core = nullptr;
}
global_handle_table->Finalize();
global_handle_table.reset();
@ -152,15 +166,7 @@ struct KernelCore::Impl {
}
}
// Shutdown all processes.
if (current_process) {
(*current_process).Finalize();
// current_process->Close();
// TODO: The current process should be destroyed based on accurate ref counting after
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
(*current_process).Destroy();
current_process = nullptr;
}
CloseCurrentProcess();
// Track kernel objects that were not freed on shutdown
{
@ -257,6 +263,18 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
void InitializeResourceManagers(VAddr address, size_t size) {
dynamic_page_manager = std::make_unique<KDynamicPageManager>();
memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
dynamic_page_manager->Initialize(address, size);
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
memory_block_heap->Initialize(dynamic_page_manager.get(),
ApplicationMemoryBlockSlabHeapSize);
app_memory_block_manager->Initialize(nullptr, memory_block_heap.get());
}
void InitializeShutdownThreads() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
shutdown_threads[core_id] = KThread::Create(system.Kernel());
@ -344,11 +362,6 @@ struct KernelCore::Impl {
static inline thread_local KThread* current_thread{nullptr};
KThread* GetCurrentEmuThread() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (IsShuttingDown()) {
return {};
}
const auto thread_id = GetCurrentHostThreadID();
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
return GetHostDummyThread();
@ -770,6 +783,11 @@ struct KernelCore::Impl {
// Kernel memory management
std::unique_ptr<KMemoryManager> memory_manager;
// Dynamic slab managers
std::unique_ptr<KDynamicPageManager> dynamic_page_manager;
std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap;
std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager;
// Shared memory for services
Kernel::KSharedMemory* hid_shared_mem{};
Kernel::KSharedMemory* font_shared_mem{};
@ -853,6 +871,10 @@ const KProcess* KernelCore::CurrentProcess() const {
return impl->current_process;
}
void KernelCore::CloseCurrentProcess() {
impl->CloseCurrentProcess();
}
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
return impl->process_list;
}
@ -1041,6 +1063,14 @@ const KMemoryManager& KernelCore::MemoryManager() const {
return *impl->memory_manager;
}
KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() {
return *impl->app_memory_block_manager;
}
const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const {
return *impl->app_memory_block_manager;
}
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
return *impl->hid_shared_mem;
}

View File

@ -37,6 +37,7 @@ class KClientSession;
class KEvent;
class KHandleTable;
class KLinkedListNode;
class KMemoryBlockSlabManager;
class KMemoryLayout;
class KMemoryManager;
class KPageBuffer;
@ -130,6 +131,9 @@ public:
/// Retrieves a const pointer to the current process.
const KProcess* CurrentProcess() const;
/// Closes the current process.
void CloseCurrentProcess();
/// Retrieves the list of processes.
const std::vector<KProcess*>& GetProcessList() const;
@ -238,6 +242,12 @@ public:
/// Gets the virtual memory manager for the kernel.
const KMemoryManager& MemoryManager() const;
/// Gets the application memory block manager for the kernel.
KMemoryBlockSlabManager& GetApplicationMemoryBlockManager();
/// Gets the application memory block manager for the kernel.
const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const;
/// Gets the shared memory object for HID services.
Kernel::KSharedMemory& GetHidSharedMem();

View File

@ -933,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han
return ResultSuccess;
case GetInfoType::UserExceptionContextAddr:
*result = process->GetTLSRegionAddress();
*result = process->GetProcessLocalRegionAddress();
return ResultSuccess;
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
@ -1888,7 +1888,7 @@ static void ExitProcess(Core::System& system) {
auto* current_process = system.Kernel().CurrentProcess();
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
"Process has already exited");
system.Exit();
@ -2557,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand
return ResultInvalidEnumValue;
}
*out = static_cast<u64>(process->GetStatus());
*out = static_cast<u64>(process->GetState());
return ResultSuccess;
}

View File

@ -14,8 +14,13 @@ namespace Kernel::Svc {
using namespace Common::Literals;
constexpr s32 ArgumentHandleCountMax = 0x40;
constexpr u32 HandleWaitMask{1u << 30};
enum {
HandleWaitMask = (1u << 30),
};
constexpr inline s32 ArgumentHandleCountMax = 0x40;
constexpr inline s64 WaitInfinite = -1;
constexpr inline std::size_t HeapSizeAlignment = 2_MiB;

View File

@ -95,6 +95,19 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
constexpr inline s32 LowestThreadPriority = 63;
constexpr inline s32 HighestThreadPriority = 0;
constexpr inline s32 SystemThreadPriorityHighest = 16;
enum ProcessState : u32 {
ProcessState_Created = 0,
ProcessState_CreatedAttached = 1,
ProcessState_Running = 2,
ProcessState_Crashed = 3,
ProcessState_RunningAttached = 4,
ProcessState_Terminating = 5,
ProcessState_Terminated = 6,
ProcessState_DebugBreak = 7,
};
constexpr inline size_t ThreadLocalRegionSize = 0x200;
} // namespace Kernel::Svc

View File

@ -135,6 +135,14 @@ union Result {
[[nodiscard]] constexpr bool IsFailure() const {
return !IsSuccess();
}
[[nodiscard]] constexpr u32 GetInnerValue() const {
return static_cast<u32>(module.Value()) | (description << module.bits);
}
[[nodiscard]] constexpr bool Includes(Result result) const {
return GetInnerValue() == result.GetInnerValue();
}
};
static_assert(std::is_trivial_v<Result>);
@ -462,9 +470,6 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
#define R_UNLESS(expr, res) \
{ \
if (!(expr)) { \
if (res.IsError()) { \
LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \
} \
R_THROW(res); \
} \
}

View File

@ -290,7 +290,7 @@ public:
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
const auto start_info{page_table.QueryInfo(start - 1)};
if (start_info.state != Kernel::KMemoryState::Free) {
if (start_info.GetState() != Kernel::KMemoryState::Free) {
return {};
}
@ -300,7 +300,7 @@ public:
const auto end_info{page_table.QueryInfo(start + size)};
if (end_info.state != Kernel::KMemoryState::Free) {
if (end_info.GetState() != Kernel::KMemoryState::Free) {
return {};
}

View File

@ -128,7 +128,8 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
}
ASSERT(system.CurrentProcess()
->PageTable()
.LockForDeviceAddressSpace(handle_description->address, handle_description->size)
.LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
Kernel::KMemoryPermission::None, true)
.IsSuccess());
std::memcpy(output.data(), &params, sizeof(params));
return result;

View File

@ -65,7 +65,7 @@ struct Memory::Impl {
return {};
}
return system.DeviceMemory().GetPointer(paddr) + vaddr;
return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
}
[[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
@ -75,7 +75,7 @@ struct Memory::Impl {
return {};
}
return system.DeviceMemory().GetPointer(paddr) + vaddr;
return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
}
u8 Read8(const VAddr addr) {
@ -499,7 +499,7 @@ struct Memory::Impl {
} else {
while (base != end) {
page_table.pointers[base].Store(
system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type);
system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
ASSERT_MSG(page_table.pointers[base].Pointer(),

View File

@ -137,28 +137,35 @@ bool IsLegacyAttribute(IR::Attribute attribute) {
}
std::map<IR::Attribute, IR::Attribute> GenerateLegacyToGenericMappings(
const VaryingState& state, std::queue<IR::Attribute> ununsed_generics) {
const VaryingState& state, std::queue<IR::Attribute> unused_generics,
const std::map<IR::Attribute, IR::Attribute>& previous_stage_mapping) {
std::map<IR::Attribute, IR::Attribute> mapping;
auto update_mapping = [&mapping, &unused_generics, previous_stage_mapping](IR::Attribute attr,
size_t count) {
if (previous_stage_mapping.find(attr) != previous_stage_mapping.end()) {
for (size_t i = 0; i < count; ++i) {
mapping.insert({attr + i, previous_stage_mapping.at(attr + i)});
}
} else {
for (size_t i = 0; i < count; ++i) {
mapping.insert({attr + i, unused_generics.front() + i});
}
unused_generics.pop();
}
};
for (size_t index = 0; index < 4; ++index) {
auto attr = IR::Attribute::ColorFrontDiffuseR + index * 4;
if (state.AnyComponent(attr)) {
for (size_t i = 0; i < 4; ++i) {
mapping.insert({attr + i, ununsed_generics.front() + i});
}
ununsed_generics.pop();
update_mapping(attr, 4);
}
}
if (state[IR::Attribute::FogCoordinate]) {
mapping.insert({IR::Attribute::FogCoordinate, ununsed_generics.front()});
ununsed_generics.pop();
update_mapping(IR::Attribute::FogCoordinate, 1);
}
for (size_t index = 0; index < IR::NUM_FIXEDFNCTEXTURE; ++index) {
auto attr = IR::Attribute::FixedFncTexture0S + index * 4;
if (state.AnyComponent(attr)) {
for (size_t i = 0; i < 4; ++i) {
mapping.insert({attr + i, ununsed_generics.front() + i});
}
ununsed_generics.pop();
update_mapping(attr, 4);
}
}
return mapping;
@ -271,15 +278,16 @@ void ConvertLegacyToGeneric(IR::Program& program, const Shader::RuntimeInfo& run
ununsed_output_generics.push(IR::Attribute::Generic0X + index * 4);
}
}
auto mappings = GenerateLegacyToGenericMappings(stores, ununsed_output_generics);
program.info.legacy_stores_mapping =
GenerateLegacyToGenericMappings(stores, ununsed_output_generics, {});
for (IR::Block* const block : program.post_order_blocks) {
for (IR::Inst& inst : block->Instructions()) {
switch (inst.GetOpcode()) {
case IR::Opcode::SetAttribute: {
const auto attr = inst.Arg(0).Attribute();
if (IsLegacyAttribute(attr)) {
stores.Set(mappings[attr], true);
inst.SetArg(0, Shader::IR::Value(mappings[attr]));
stores.Set(program.info.legacy_stores_mapping[attr], true);
inst.SetArg(0, Shader::IR::Value(program.info.legacy_stores_mapping[attr]));
}
break;
}
@ -300,7 +308,8 @@ void ConvertLegacyToGeneric(IR::Program& program, const Shader::RuntimeInfo& run
ununsed_input_generics.push(IR::Attribute::Generic0X + index * 4);
}
}
auto mappings = GenerateLegacyToGenericMappings(loads, ununsed_input_generics);
auto mappings = GenerateLegacyToGenericMappings(
loads, ununsed_input_generics, runtime_info.previous_stage_legacy_stores_mapping);
for (IR::Block* const block : program.post_order_blocks) {
for (IR::Inst& inst : block->Instructions()) {
switch (inst.GetOpcode()) {

View File

@ -4,6 +4,7 @@
#pragma once
#include <array>
#include <map>
#include <optional>
#include <vector>
@ -60,6 +61,7 @@ struct TransformFeedbackVarying {
struct RuntimeInfo {
std::array<AttributeType, 32> generic_input_types{};
VaryingState previous_stage_stores;
std::map<IR::Attribute, IR::Attribute> previous_stage_legacy_stores_mapping;
bool convert_depth_mode{};
bool force_early_z{};

View File

@ -5,6 +5,7 @@
#include <array>
#include <bitset>
#include <map>
#include "common/common_types.h"
#include "shader_recompiler/frontend/ir/type.h"
@ -127,6 +128,8 @@ struct Info {
VaryingState stores;
VaryingState passthrough;
std::map<IR::Attribute, IR::Attribute> legacy_stores_mapping;
bool loads_indexed_attributes{};
std::array<bool, 8> stores_frag_color{};

View File

@ -40,9 +40,6 @@ struct ScopeInit final {
core_timing.SetMulticore(true);
core_timing.Initialize([]() {});
}
~ScopeInit() {
core_timing.Shutdown();
}
Core::Timing::CoreTiming core_timing;
};

View File

@ -63,6 +63,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
Shader::RuntimeInfo info;
if (previous_program) {
info.previous_stage_stores = previous_program->info.stores;
info.previous_stage_legacy_stores_mapping = previous_program->info.legacy_stores_mapping;
} else {
// Mark all stores as available for vertex shaders
info.previous_stage_stores.mask.set();

View File

@ -134,6 +134,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
Shader::RuntimeInfo info;
if (previous_program) {
info.previous_stage_stores = previous_program->info.stores;
info.previous_stage_legacy_stores_mapping = previous_program->info.legacy_stores_mapping;
if (previous_program->is_geometry_passthrough) {
info.previous_stage_stores.mask |= previous_program->info.passthrough.mask;
}

View File

@ -59,10 +59,11 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
return query_pool == *pool;
});
ASSERT(it != std::end(pools));
const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
if (it != std::end(pools)) {
const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
}
}
QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,

View File

@ -120,8 +120,8 @@ void EmuThread::run() {
}
}
// Shutdown the core emulation
system.Shutdown();
// Shutdown the main emulated process
system.ShutdownMainProcess();
#if MICROPROFILE_ENABLED
MicroProfileOnThreadExit();

View File

@ -294,6 +294,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan
#ifdef __linux__
SetupSigInterrupts();
#endif
system->Initialize();
Common::Log::Initialize();
LoadTranslation();

View File

@ -302,6 +302,8 @@ int main(int argc, char** argv) {
}
Core::System system{};
system.Initialize();
InputCommon::InputSubsystem input_subsystem{};
// Apply the command line arguments
@ -392,7 +394,7 @@ int main(int argc, char** argv) {
}
system.DetachDebugger();
void(system.Pause());
system.Shutdown();
system.ShutdownMainProcess();
detached_tasks.WaitForAllTasks();
return 0;