early-access version 3039

This commit is contained in:
pineappleEA 2022-10-20 00:52:26 +02:00
parent a9b25068d9
commit b070369939
45 changed files with 2204 additions and 1371 deletions

View File

@ -1,7 +1,7 @@
yuzu emulator early access yuzu emulator early access
============= =============
This is the source code for early-access 3038. This is the source code for early-access 3039.
## Legal Notice ## Legal Notice

View File

@ -190,6 +190,9 @@ add_library(core STATIC
hle/kernel/k_code_memory.h hle/kernel/k_code_memory.h
hle/kernel/k_condition_variable.cpp hle/kernel/k_condition_variable.cpp
hle/kernel/k_condition_variable.h hle/kernel/k_condition_variable.h
hle/kernel/k_dynamic_page_manager.h
hle/kernel/k_dynamic_resource_manager.h
hle/kernel/k_dynamic_slab_heap.h
hle/kernel/k_event.cpp hle/kernel/k_event.cpp
hle/kernel/k_event.h hle/kernel/k_event.h
hle/kernel/k_handle_table.cpp hle/kernel/k_handle_table.cpp

View File

@ -134,6 +134,14 @@ void ARM_Interface::Run() {
} }
system.ExitDynarmicProfile(); system.ExitDynarmicProfile();
// If the thread is scheduled for termination, exit the thread.
if (current_thread->HasDpc()) {
if (current_thread->IsTerminationRequested()) {
current_thread->Exit();
UNREACHABLE();
}
}
// Notify the debugger and go to sleep if a breakpoint was hit, // Notify the debugger and go to sleep if a breakpoint was hit,
// or if the thread is unable to continue for any reason. // or if the thread is unable to continue for any reason.
if (Has(hr, breakpoint) || Has(hr, no_execute)) { if (Has(hr, breakpoint) || Has(hr, no_execute)) {

View File

@ -133,6 +133,50 @@ struct System::Impl {
: kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{}, : kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {} cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {}
void Initialize(System& system) {
device_memory = std::make_unique<Core::DeviceMemory>();
is_multicore = Settings::values.use_multi_core.GetValue();
core_timing.SetMulticore(is_multicore);
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
const auto current_time =
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
Settings::values.custom_rtc_differential =
Settings::values.custom_rtc.value_or(current_time) - current_time;
// Create a default fs if one doesn't already exist.
if (virtual_filesystem == nullptr) {
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
}
if (content_provider == nullptr) {
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
}
// Create default implementations of applets if one is not provided.
applet_manager.SetDefaultAppletsIfMissing();
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
kernel.SetMulticore(is_multicore);
cpu_manager.SetMulticore(is_multicore);
cpu_manager.SetAsyncGpu(is_async_gpu);
}
void ReinitializeIfNecessary(System& system) {
if (is_multicore == Settings::values.use_multi_core.GetValue()) {
return;
}
LOG_DEBUG(Kernel, "Re-initializing");
is_multicore = Settings::values.use_multi_core.GetValue();
Initialize(system);
}
SystemResultStatus Run() { SystemResultStatus Run() {
std::unique_lock<std::mutex> lk(suspend_guard); std::unique_lock<std::mutex> lk(suspend_guard);
status = SystemResultStatus::Success; status = SystemResultStatus::Success;
@ -178,37 +222,14 @@ struct System::Impl {
debugger = std::make_unique<Debugger>(system, port); debugger = std::make_unique<Debugger>(system, port);
} }
SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) {
LOG_DEBUG(Core, "initialized OK"); LOG_DEBUG(Core, "initialized OK");
device_memory = std::make_unique<Core::DeviceMemory>(); // Setting changes may require a full system reinitialization (e.g., disabling multicore).
ReinitializeIfNecessary(system);
is_multicore = Settings::values.use_multi_core.GetValue();
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
kernel.SetMulticore(is_multicore);
cpu_manager.SetMulticore(is_multicore);
cpu_manager.SetAsyncGpu(is_async_gpu);
core_timing.SetMulticore(is_multicore);
kernel.Initialize(); kernel.Initialize();
cpu_manager.Initialize(); cpu_manager.Initialize();
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
const auto current_time =
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
Settings::values.custom_rtc_differential =
Settings::values.custom_rtc.value_or(current_time) - current_time;
// Create a default fs if one doesn't already exist.
if (virtual_filesystem == nullptr)
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
if (content_provider == nullptr)
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
/// Create default implementations of applets if one is not provided.
applet_manager.SetDefaultAppletsIfMissing();
/// Reset all glue registrations /// Reset all glue registrations
arp_manager.ResetAll(); arp_manager.ResetAll();
@ -253,11 +274,11 @@ struct System::Impl {
return SystemResultStatus::ErrorGetLoader; return SystemResultStatus::ErrorGetLoader;
} }
SystemResultStatus init_result{Init(system, emu_window)}; SystemResultStatus init_result{SetupForMainProcess(system, emu_window)};
if (init_result != SystemResultStatus::Success) { if (init_result != SystemResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to initialize system (Error {})!", LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
static_cast<int>(init_result)); static_cast<int>(init_result));
Shutdown(); ShutdownMainProcess();
return init_result; return init_result;
} }
@ -276,7 +297,7 @@ struct System::Impl {
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system); const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
if (load_result != Loader::ResultStatus::Success) { if (load_result != Loader::ResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result); LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
Shutdown(); ShutdownMainProcess();
return static_cast<SystemResultStatus>( return static_cast<SystemResultStatus>(
static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result)); static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
@ -335,7 +356,7 @@ struct System::Impl {
return status; return status;
} }
void Shutdown() { void ShutdownMainProcess() {
SetShuttingDown(true); SetShuttingDown(true);
// Log last frame performance stats if game was loded // Log last frame performance stats if game was loded
@ -369,7 +390,7 @@ struct System::Impl {
cheat_engine.reset(); cheat_engine.reset();
telemetry_session.reset(); telemetry_session.reset();
time_manager.Shutdown(); time_manager.Shutdown();
core_timing.Shutdown(); core_timing.ClearPendingEvents();
app_loader.reset(); app_loader.reset();
audio_core.reset(); audio_core.reset();
gpu_core.reset(); gpu_core.reset();
@ -377,7 +398,6 @@ struct System::Impl {
perf_stats.reset(); perf_stats.reset();
kernel.Shutdown(); kernel.Shutdown();
memory.Reset(); memory.Reset();
applet_manager.ClearAll();
if (auto room_member = room_network.GetRoomMember().lock()) { if (auto room_member = room_network.GetRoomMember().lock()) {
Network::GameInfo game_info{}; Network::GameInfo game_info{};
@ -520,6 +540,10 @@ const CpuManager& System::GetCpuManager() const {
return impl->cpu_manager; return impl->cpu_manager;
} }
void System::Initialize() {
impl->Initialize(*this);
}
SystemResultStatus System::Run() { SystemResultStatus System::Run() {
return impl->Run(); return impl->Run();
} }
@ -540,8 +564,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
impl->kernel.InvalidateCpuInstructionCacheRange(addr, size); impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
} }
void System::Shutdown() { void System::ShutdownMainProcess() {
impl->Shutdown(); impl->ShutdownMainProcess();
} }
bool System::IsShuttingDown() const { bool System::IsShuttingDown() const {

View File

@ -142,6 +142,12 @@ public:
System(System&&) = delete; System(System&&) = delete;
System& operator=(System&&) = delete; System& operator=(System&&) = delete;
/**
* Initializes the system
* This function will initialize core functionaility used for system emulation
*/
void Initialize();
/** /**
* Run the OS and Application * Run the OS and Application
* This function will start emulation and run the relevant devices * This function will start emulation and run the relevant devices
@ -166,8 +172,8 @@ public:
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
/// Shutdown the emulated system. /// Shutdown the main emulated process.
void Shutdown(); void ShutdownMainProcess();
/// Check if the core is shutting down. /// Check if the core is shutting down.
[[nodiscard]] bool IsShuttingDown() const; [[nodiscard]] bool IsShuttingDown() const;

View File

@ -40,7 +40,9 @@ struct CoreTiming::Event {
CoreTiming::CoreTiming() CoreTiming::CoreTiming()
: clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {} : clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
CoreTiming::~CoreTiming() = default; CoreTiming::~CoreTiming() {
Reset();
}
void CoreTiming::ThreadEntry(CoreTiming& instance) { void CoreTiming::ThreadEntry(CoreTiming& instance) {
constexpr char name[] = "HostTiming"; constexpr char name[] = "HostTiming";
@ -53,6 +55,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
} }
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
Reset();
on_thread_init = std::move(on_thread_init_); on_thread_init = std::move(on_thread_init_);
event_fifo_id = 0; event_fifo_id = 0;
shutting_down = false; shutting_down = false;
@ -65,17 +68,8 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
} }
} }
void CoreTiming::Shutdown() { void CoreTiming::ClearPendingEvents() {
paused = true; event_queue.clear();
shutting_down = true;
pause_event.Set();
event.Set();
if (timer_thread) {
timer_thread->join();
}
ClearPendingEvents();
timer_thread.reset();
has_started = false;
} }
void CoreTiming::Pause(bool is_paused) { void CoreTiming::Pause(bool is_paused) {
@ -196,10 +190,6 @@ u64 CoreTiming::GetClockTicks() const {
return CpuCyclesToClockCycles(ticks); return CpuCyclesToClockCycles(ticks);
} }
void CoreTiming::ClearPendingEvents() {
event_queue.clear();
}
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
std::scoped_lock lock{basic_lock}; std::scoped_lock lock{basic_lock};
@ -307,6 +297,18 @@ void CoreTiming::ThreadLoop() {
} }
} }
void CoreTiming::Reset() {
paused = true;
shutting_down = true;
pause_event.Set();
event.Set();
if (timer_thread) {
timer_thread->join();
}
timer_thread.reset();
has_started = false;
}
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const { std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
if (is_multicore) { if (is_multicore) {
return clock->GetTimeNS(); return clock->GetTimeNS();

View File

@ -61,19 +61,14 @@ public:
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed. /// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
void Initialize(std::function<void()>&& on_thread_init_); void Initialize(std::function<void()>&& on_thread_init_);
/// Tears down all timing related functionality. /// Clear all pending events. This should ONLY be done on exit.
void Shutdown(); void ClearPendingEvents();
/// Sets if emulation is multicore or single core, must be set before Initialize /// Sets if emulation is multicore or single core, must be set before Initialize
void SetMulticore(bool is_multicore_) { void SetMulticore(bool is_multicore_) {
is_multicore = is_multicore_; is_multicore = is_multicore_;
} }
/// Check if it's using host timing.
bool IsHostTiming() const {
return is_multicore;
}
/// Pauses/Unpauses the execution of the timer thread. /// Pauses/Unpauses the execution of the timer thread.
void Pause(bool is_paused); void Pause(bool is_paused);
@ -136,12 +131,11 @@ public:
private: private:
struct Event; struct Event;
/// Clear all pending events. This should ONLY be done on exit.
void ClearPendingEvents();
static void ThreadEntry(CoreTiming& instance); static void ThreadEntry(CoreTiming& instance);
void ThreadLoop(); void ThreadLoop();
void Reset();
std::unique_ptr<Common::WallClock> clock; std::unique_ptr<Common::WallClock> clock;
s64 global_timer = 0; s64 global_timer = 0;

View File

@ -31,12 +31,14 @@ public:
DramMemoryMap::Base; DramMemoryMap::Base;
} }
u8* GetPointer(PAddr addr) { template <typename T>
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); T* GetPointer(PAddr addr) {
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
} }
const u8* GetPointer(PAddr addr) const { template <typename T>
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); const T* GetPointer(PAddr addr) const {
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
} }
Common::HostMemory buffer; Common::HostMemory buffer;

View File

@ -5,7 +5,6 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/uuid.h"
#include "core/core.h" #include "core/core.h"
#include "core/file_sys/savedata_factory.h" #include "core/file_sys/savedata_factory.h"
#include "core/file_sys/vfs.h" #include "core/file_sys/vfs.h"
@ -60,36 +59,6 @@ bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataA
attr.title_id == 0 && attr.save_id == 0); attr.title_id == 0 && attr.save_id == 0);
} }
std::string GetFutureSaveDataPath(SaveDataSpaceId space_id, SaveDataType type, u64 title_id,
u128 user_id) {
// Only detect nand user saves.
const auto space_id_path = [space_id]() -> std::string_view {
switch (space_id) {
case SaveDataSpaceId::NandUser:
return "/user/save";
default:
return "";
}
}();
if (space_id_path.empty()) {
return "";
}
Common::UUID uuid;
std::memcpy(uuid.uuid.data(), user_id.data(), sizeof(Common::UUID));
// Only detect account/device saves from the future location.
switch (type) {
case SaveDataType::SaveData:
return fmt::format("{}/account/{}/{:016X}/1", space_id_path, uuid.RawString(), title_id);
case SaveDataType::DeviceSaveData:
return fmt::format("{}/device/{:016X}/1", space_id_path, title_id);
default:
return "";
}
}
} // Anonymous namespace } // Anonymous namespace
std::string SaveDataAttribute::DebugInfo() const { std::string SaveDataAttribute::DebugInfo() const {
@ -113,7 +82,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space,
PrintSaveDataAttributeWarnings(meta); PrintSaveDataAttributeWarnings(meta);
const auto save_directory = const auto save_directory =
GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id); GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
auto out = dir->CreateDirectoryRelative(save_directory); auto out = dir->CreateDirectoryRelative(save_directory);
@ -130,7 +99,7 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space,
const SaveDataAttribute& meta) const { const SaveDataAttribute& meta) const {
const auto save_directory = const auto save_directory =
GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id); GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
auto out = dir->GetDirectoryRelative(save_directory); auto out = dir->GetDirectoryRelative(save_directory);
@ -165,9 +134,9 @@ std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) {
} }
} }
std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir, std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId space,
SaveDataSpaceId space, SaveDataType type, u64 title_id, SaveDataType type, u64 title_id, u128 user_id,
u128 user_id, u64 save_id) { u64 save_id) {
// According to switchbrew, if a save is of type SaveData and the title id field is 0, it should // According to switchbrew, if a save is of type SaveData and the title id field is 0, it should
// be interpreted as the title id of the current process. // be interpreted as the title id of the current process.
if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) { if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) {
@ -176,17 +145,6 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir,
} }
} }
// For compat with a future impl.
if (std::string future_path =
GetFutureSaveDataPath(space, type, title_id & ~(0xFFULL), user_id);
!future_path.empty()) {
// Check if this location exists, and prefer it over the old.
if (const auto future_dir = dir->GetDirectoryRelative(future_path); future_dir != nullptr) {
LOG_INFO(Service_FS, "Using save at new location: {}", future_path);
return future_path;
}
}
std::string out = GetSaveDataSpaceIdPath(space); std::string out = GetSaveDataSpaceIdPath(space);
switch (type) { switch (type) {
@ -209,8 +167,7 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir,
SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id, SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
u128 user_id) const { u128 user_id) const {
const auto path = const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME); const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME);
@ -228,8 +185,7 @@ SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
SaveDataSize new_value) const { SaveDataSize new_value) const {
const auto path = const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path); const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME); const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME);

View File

@ -95,8 +95,8 @@ public:
VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const; VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const;
static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space); static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space);
static std::string GetFullPath(Core::System& system, VirtualDir dir, SaveDataSpaceId space, static std::string GetFullPath(Core::System& system, SaveDataSpaceId space, SaveDataType type,
SaveDataType type, u64 title_id, u128 user_id, u64 save_id); u64 title_id, u128 user_id, u64 save_id);
SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const; SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const;
void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id, void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,

View File

@ -94,8 +94,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
// TODO(bunnei): Fix this once we support the kernel virtual memory layout. // TODO(bunnei): Fix this once we support the kernel virtual memory layout.
if (size > 0) { if (size > 0) {
void* backing_kernel_memory{ void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>(
system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))}; TranslateSlabAddrToPhysical(memory_layout, start))};
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
ASSERT(region != nullptr); ASSERT(region != nullptr);
@ -181,7 +181,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) {
ASSERT(slab_address != 0); ASSERT(slab_address != 0);
// Initialize the slabheap. // Initialize the slabheap.
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
slab_size); slab_size);
} }

View File

@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
// Clear the memory. // Clear the memory.
for (const auto& block : m_page_group.Nodes()) { for (const auto& block : m_page_group.Nodes()) {
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
} }
// Set remaining tracking members. // Set remaining tracking members.

View File

@ -20,18 +20,6 @@ public:
}; };
static_assert(sizeof(PageBuffer) == PageSize); static_assert(sizeof(PageBuffer) == PageSize);
private:
KSpinLock m_lock;
KPageBitmap m_page_bitmap;
size_t m_used{};
size_t m_peak{};
size_t m_count{};
VAddr m_address{};
size_t m_size{};
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
std::vector<u8> m_backing_memory;
public: public:
KDynamicPageManager() = default; KDynamicPageManager() = default;
@ -80,19 +68,19 @@ public:
R_SUCCEED(); R_SUCCEED();
} }
constexpr VAddr GetAddress() const { VAddr GetAddress() const {
return m_address; return m_address;
} }
constexpr size_t GetSize() const { size_t GetSize() const {
return m_size; return m_size;
} }
constexpr size_t GetUsed() const { size_t GetUsed() const {
return m_used; return m_used;
} }
constexpr size_t GetPeak() const { size_t GetPeak() const {
return m_peak; return m_peak;
} }
constexpr size_t GetCount() const { size_t GetCount() const {
return m_count; return m_count;
} }
@ -131,6 +119,18 @@ public:
// Decrement our used count. // Decrement our used count.
--m_used; --m_used;
} }
private:
KSpinLock m_lock;
KPageBitmap m_page_bitmap;
size_t m_used{};
size_t m_peak{};
size_t m_count{};
VAddr m_address{};
size_t m_size{};
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
std::vector<u8> m_backing_memory;
}; };
} // namespace Kernel } // namespace Kernel

View File

@ -17,10 +17,6 @@ class KDynamicResourceManager {
public: public:
using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>; using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;
private:
KDynamicPageManager* m_page_allocator{};
DynamicSlabType* m_slab_heap{};
public: public:
constexpr KDynamicResourceManager() = default; constexpr KDynamicResourceManager() = default;
@ -49,6 +45,10 @@ public:
void Free(T* t) const { void Free(T* t) const {
m_slab_heap->Free(t); m_slab_heap->Free(t);
} }
private:
KDynamicPageManager* m_page_allocator{};
DynamicSlabType* m_slab_heap{};
}; };
class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {};

View File

@ -16,16 +16,6 @@ class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
YUZU_NON_COPYABLE(KDynamicSlabHeap); YUZU_NON_COPYABLE(KDynamicSlabHeap);
YUZU_NON_MOVEABLE(KDynamicSlabHeap); YUZU_NON_MOVEABLE(KDynamicSlabHeap);
private:
using PageBuffer = KDynamicPageManager::PageBuffer;
private:
std::atomic<size_t> m_used{};
std::atomic<size_t> m_peak{};
std::atomic<size_t> m_count{};
VAddr m_address{};
size_t m_size{};
public: public:
constexpr KDynamicSlabHeap() = default; constexpr KDynamicSlabHeap() = default;
@ -117,6 +107,16 @@ public:
KSlabHeapImpl::Free(t); KSlabHeapImpl::Free(t);
--m_used; --m_used;
} }
private:
using PageBuffer = KDynamicPageManager::PageBuffer;
private:
std::atomic<size_t> m_used{};
std::atomic<size_t> m_peak{};
std::atomic<size_t> m_count{};
VAddr m_address{};
size_t m_size{};
}; };
} // namespace Kernel } // namespace Kernel

View File

@ -11,16 +11,12 @@
namespace Kernel::KInterruptManager { namespace Kernel::KInterruptManager {
void HandleInterrupt(KernelCore& kernel, s32 core_id) { void HandleInterrupt(KernelCore& kernel, s32 core_id) {
auto* process = kernel.CurrentProcess();
if (!process) {
return;
}
// Acknowledge the interrupt. // Acknowledge the interrupt.
kernel.PhysicalCore(core_id).ClearInterrupt(); kernel.PhysicalCore(core_id).ClearInterrupt();
auto& current_thread = GetCurrentThread(kernel); auto& current_thread = GetCurrentThread(kernel);
if (auto* process = kernel.CurrentProcess(); process) {
// If the user disable count is set, we may need to pin the current thread. // If the user disable count is set, we may need to pin the current thread.
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{kernel};
@ -31,9 +27,18 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
// Set the interrupt flag for the thread. // Set the interrupt flag for the thread.
GetCurrentThread(kernel).SetInterruptFlag(); GetCurrentThread(kernel).SetInterruptFlag();
} }
}
// Request interrupt scheduling. // Request interrupt scheduling.
kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
} }
void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) {
for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
if (core_mask & (1ULL << core_id)) {
kernel.PhysicalCore(core_id).Interrupt();
}
}
}
} // namespace Kernel::KInterruptManager } // namespace Kernel::KInterruptManager

View File

@ -11,6 +11,8 @@ class KernelCore;
namespace KInterruptManager { namespace KInterruptManager {
void HandleInterrupt(KernelCore& kernel, s32 core_id); void HandleInterrupt(KernelCore& kernel, s32 core_id);
} void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask);
} // namespace KInterruptManager
} // namespace Kernel } // namespace Kernel

View File

@ -6,6 +6,7 @@
#include "common/alignment.h" #include "common/alignment.h"
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/memory_types.h" #include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_types.h" #include "core/hle/kernel/svc_types.h"
@ -168,9 +169,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per
enum class KMemoryAttribute : u8 { enum class KMemoryAttribute : u8 {
None = 0x00, None = 0x00,
Mask = 0x7F, All = 0xFF,
All = Mask, UserMask = All,
DontCareMask = 0x80,
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked), Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
@ -178,76 +178,112 @@ enum class KMemoryAttribute : u8 {
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
SetMask = Uncached, SetMask = Uncached,
IpcAndDeviceMapped = IpcLocked | DeviceShared,
LockedAndIpcLocked = Locked | IpcLocked,
DeviceSharedAndUncached = DeviceShared | Uncached
}; };
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
static_assert((static_cast<u8>(KMemoryAttribute::Mask) & enum class KMemoryBlockDisableMergeAttribute : u8 {
static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0); None = 0,
Normal = (1u << 0),
DeviceLeft = (1u << 1),
IpcLeft = (1u << 2),
Locked = (1u << 3),
DeviceRight = (1u << 4),
AllLeft = Normal | DeviceLeft | IpcLeft | Locked,
AllRight = DeviceRight,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute);
struct KMemoryInfo { struct KMemoryInfo {
VAddr addr{}; uintptr_t m_address;
std::size_t size{}; size_t m_size;
KMemoryState state{}; KMemoryState m_state;
KMemoryPermission perm{}; u16 m_device_disable_merge_left_count;
KMemoryAttribute attribute{}; u16 m_device_disable_merge_right_count;
KMemoryPermission original_perm{}; u16 m_ipc_lock_count;
u16 ipc_lock_count{}; u16 m_device_use_count;
u16 device_use_count{}; u16 m_ipc_disable_merge_count;
KMemoryPermission m_permission;
KMemoryAttribute m_attribute;
KMemoryPermission m_original_permission;
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
return { return {
addr, .addr = m_address,
size, .size = m_size,
static_cast<Svc::MemoryState>(state & KMemoryState::Mask), .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask),
static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask), .attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask),
static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask), .perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask),
ipc_lock_count, .ipc_refcount = m_ipc_lock_count,
device_use_count, .device_refcount = m_device_use_count,
.padding = {},
}; };
} }
constexpr VAddr GetAddress() const { constexpr uintptr_t GetAddress() const {
return addr; return m_address;
} }
constexpr std::size_t GetSize() const {
return size; constexpr size_t GetSize() const {
return m_size;
} }
constexpr std::size_t GetNumPages() const {
return GetSize() / PageSize; constexpr size_t GetNumPages() const {
return this->GetSize() / PageSize;
} }
constexpr VAddr GetEndAddress() const {
return GetAddress() + GetSize(); constexpr uintptr_t GetEndAddress() const {
return this->GetAddress() + this->GetSize();
} }
constexpr VAddr GetLastAddress() const {
return GetEndAddress() - 1; constexpr uintptr_t GetLastAddress() const {
return this->GetEndAddress() - 1;
} }
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
constexpr u16 GetIpcDisableMergeCount() const {
return m_ipc_disable_merge_count;
}
constexpr KMemoryState GetState() const { constexpr KMemoryState GetState() const {
return state; return m_state;
}
constexpr KMemoryAttribute GetAttribute() const {
return attribute;
} }
constexpr KMemoryPermission GetPermission() const { constexpr KMemoryPermission GetPermission() const {
return perm; return m_permission;
}
constexpr KMemoryPermission GetOriginalPermission() const {
return m_original_permission;
}
constexpr KMemoryAttribute GetAttribute() const {
return m_attribute;
}
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
return m_disable_merge_attribute;
} }
}; };
class KMemoryBlock final { class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
friend class KMemoryBlockManager;
private: private:
VAddr addr{}; u16 m_device_disable_merge_left_count;
std::size_t num_pages{}; u16 m_device_disable_merge_right_count;
KMemoryState state{KMemoryState::None}; VAddr m_address;
u16 ipc_lock_count{}; size_t m_num_pages;
u16 device_use_count{}; KMemoryState m_memory_state;
KMemoryPermission perm{KMemoryPermission::None}; u16 m_ipc_lock_count;
KMemoryPermission original_perm{KMemoryPermission::None}; u16 m_device_use_count;
KMemoryAttribute attribute{KMemoryAttribute::None}; u16 m_ipc_disable_merge_count;
KMemoryPermission m_permission;
KMemoryPermission m_original_permission;
KMemoryAttribute m_attribute;
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
public: public:
static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) { static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
@ -261,113 +297,349 @@ public:
} }
public: public:
constexpr KMemoryBlock() = default;
constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
KMemoryPermission perm_, KMemoryAttribute attribute_)
: addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
constexpr VAddr GetAddress() const { constexpr VAddr GetAddress() const {
return addr; return m_address;
} }
constexpr std::size_t GetNumPages() const { constexpr size_t GetNumPages() const {
return num_pages; return m_num_pages;
} }
constexpr std::size_t GetSize() const { constexpr size_t GetSize() const {
return GetNumPages() * PageSize; return this->GetNumPages() * PageSize;
} }
constexpr VAddr GetEndAddress() const { constexpr VAddr GetEndAddress() const {
return GetAddress() + GetSize(); return this->GetAddress() + this->GetSize();
} }
constexpr VAddr GetLastAddress() const { constexpr VAddr GetLastAddress() const {
return GetEndAddress() - 1; return this->GetEndAddress() - 1;
}
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
constexpr u16 GetIpcDisableMergeCount() const {
return m_ipc_disable_merge_count;
}
constexpr KMemoryPermission GetPermission() const {
return m_permission;
}
constexpr KMemoryPermission GetOriginalPermission() const {
return m_original_permission;
}
constexpr KMemoryAttribute GetAttribute() const {
return m_attribute;
} }
constexpr KMemoryInfo GetMemoryInfo() const { constexpr KMemoryInfo GetMemoryInfo() const {
return { return {
GetAddress(), GetSize(), state, perm, .m_address = this->GetAddress(),
attribute, original_perm, ipc_lock_count, device_use_count, .m_size = this->GetSize(),
.m_state = m_memory_state,
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
.m_device_disable_merge_right_count = m_device_disable_merge_right_count,
.m_ipc_lock_count = m_ipc_lock_count,
.m_device_use_count = m_device_use_count,
.m_ipc_disable_merge_count = m_ipc_disable_merge_count,
.m_permission = m_permission,
.m_attribute = m_attribute,
.m_original_permission = m_original_permission,
.m_disable_merge_attribute = m_disable_merge_attribute,
}; };
} }
void ShareToDevice(KMemoryPermission /*new_perm*/) { public:
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || explicit KMemoryBlock() = default;
device_use_count == 0);
attribute |= KMemoryAttribute::DeviceShared; constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
const u16 new_use_count{++device_use_count}; KMemoryAttribute attr)
ASSERT(new_use_count > 0); : Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(),
m_device_disable_merge_left_count(), m_device_disable_merge_right_count(),
m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p),
m_original_permission(KMemoryPermission::None), m_attribute(attr),
m_disable_merge_attribute() {}
constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
KMemoryAttribute attr) {
m_device_disable_merge_left_count = 0;
m_device_disable_merge_right_count = 0;
m_address = addr;
m_num_pages = np;
m_memory_state = ms;
m_ipc_lock_count = 0;
m_device_use_count = 0;
m_permission = p;
m_original_permission = KMemoryPermission::None;
m_attribute = attr;
m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None;
} }
void UnshareToDevice(KMemoryPermission /*new_perm*/) {
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
const u16 prev_use_count{device_use_count--};
ASSERT(prev_use_count > 0);
if (prev_use_count == 1) {
attribute &= ~KMemoryAttribute::DeviceShared;
}
}
private:
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask | constexpr auto AttributeIgnoreMask =
KMemoryAttribute::IpcLocked | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
KMemoryAttribute::DeviceShared}; return m_memory_state == s && m_permission == p &&
return state == s && perm == p && (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
} }
constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm && return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission &&
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count && m_original_permission == rhs.m_original_permission &&
device_use_count == rhs.device_use_count; m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count &&
m_device_use_count == rhs.m_device_use_count;
} }
constexpr bool Contains(VAddr start) const { constexpr bool CanMergeWith(const KMemoryBlock& rhs) const {
return GetAddress() <= start && start <= GetEndAddress(); return this->HasSameProperties(rhs) &&
(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) ==
KMemoryBlockDisableMergeAttribute::None &&
(rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) ==
KMemoryBlockDisableMergeAttribute::None;
} }
constexpr void Add(std::size_t count) { constexpr bool Contains(VAddr addr) const {
ASSERT(count > 0); return this->GetAddress() <= addr && addr <= this->GetEndAddress();
ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
num_pages += count;
} }
constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm, constexpr void Add(const KMemoryBlock& added_block) {
KMemoryAttribute new_attribute) { ASSERT(added_block.GetNumPages() > 0);
ASSERT(original_perm == KMemoryPermission::None); ASSERT(this->GetAddress() + added_block.GetSize() - 1 <
ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None); this->GetEndAddress() + added_block.GetSize() - 1);
state = new_state; m_num_pages += added_block.GetNumPages();
perm = new_perm; m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | added_block.m_disable_merge_attribute);
attribute = static_cast<KMemoryAttribute>( m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
new_attribute |
(attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
} }
constexpr KMemoryBlock Split(VAddr split_addr) { constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a,
ASSERT(GetAddress() < split_addr); bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
ASSERT(Contains(split_addr)); ASSERT(m_original_permission == KMemoryPermission::None);
ASSERT(Common::IsAligned(split_addr, PageSize)); ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
KMemoryBlock block; m_memory_state = s;
block.addr = addr; m_permission = p;
block.num_pages = (split_addr - GetAddress()) / PageSize; m_attribute = static_cast<KMemoryAttribute>(
block.state = state; a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
block.ipc_lock_count = ipc_lock_count;
block.device_use_count = device_use_count;
block.perm = perm;
block.original_perm = original_perm;
block.attribute = attribute;
addr = split_addr; if (set_disable_merge_attr && set_mask != 0) {
num_pages -= block.num_pages; m_disable_merge_attribute = m_disable_merge_attribute |
static_cast<KMemoryBlockDisableMergeAttribute>(set_mask);
}
if (clear_mask != 0) {
m_disable_merge_attribute = m_disable_merge_attribute &
static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask);
}
}
return block; constexpr void Split(KMemoryBlock* block, VAddr addr) {
ASSERT(this->GetAddress() < addr);
ASSERT(this->Contains(addr));
ASSERT(Common::IsAligned(addr, PageSize));
block->m_address = m_address;
block->m_num_pages = (addr - this->GetAddress()) / PageSize;
block->m_memory_state = m_memory_state;
block->m_ipc_lock_count = m_ipc_lock_count;
block->m_device_use_count = m_device_use_count;
block->m_permission = m_permission;
block->m_original_permission = m_original_permission;
block->m_attribute = m_attribute;
block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft);
block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
block->m_device_disable_merge_right_count = 0;
m_address = addr;
m_num_pages -= block->m_num_pages;
m_ipc_disable_merge_count = 0;
m_device_disable_merge_left_count = 0;
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
}
constexpr void UpdateDeviceDisableMergeStateForShareLeft(
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
if (left) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
ASSERT(new_device_disable_merge_left_count > 0);
}
}
constexpr void UpdateDeviceDisableMergeStateForShareRight(
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
if (right) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
ASSERT(new_device_disable_merge_right_count > 0);
}
}
constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left,
bool right) {
this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right);
this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
}
constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// We must either be shared or have a zero lock count.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
m_device_use_count == 0);
// Share.
const u16 new_count = ++m_device_use_count;
ASSERT(new_count > 0);
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared);
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
}
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
if (left) {
if (!m_device_disable_merge_left_count) {
return;
}
--m_device_disable_merge_left_count;
}
m_device_disable_merge_left_count =
std::min(m_device_disable_merge_left_count, m_device_use_count);
if (m_device_disable_merge_left_count == 0) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft);
}
}
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
if (right) {
const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
ASSERT(old_device_disable_merge_right_count > 0);
if (old_device_disable_merge_right_count == 1) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight);
}
}
}
constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left,
bool right) {
this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right);
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
}
constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// We must be shared.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
// Unhare.
const u16 old_count = m_device_use_count--;
ASSERT(old_count > 0);
if (old_count == 1) {
m_attribute =
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
}
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
}
constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// We must be shared.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
// Unhare.
const u16 old_count = m_device_use_count--;
ASSERT(old_count > 0);
if (old_count == 1) {
m_attribute =
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
}
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
}
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
// We must either be locked or have a zero lock count.
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
m_ipc_lock_count == 0);
// Lock.
const u16 new_lock_count = ++m_ipc_lock_count;
ASSERT(new_lock_count > 0);
// If this is our first lock, update our permissions.
if (new_lock_count == 1) {
ASSERT(m_original_permission == KMemoryPermission::None);
ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) ==
(m_permission | KMemoryPermission::NotMapped));
ASSERT((m_permission & KMemoryPermission::UserExecute) !=
KMemoryPermission::UserExecute ||
(new_perm == KMemoryPermission::UserRead));
m_original_permission = m_permission;
m_permission = static_cast<KMemoryPermission>(
(new_perm & KMemoryPermission::IpcLockChangeMask) |
(m_original_permission & ~KMemoryPermission::IpcLockChangeMask));
}
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked);
if (left) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft);
const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
ASSERT(new_ipc_disable_merge_count > 0);
}
}
constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
[[maybe_unused]] bool right) {
// We must be locked.
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);
// Unlock.
const u16 old_lock_count = m_ipc_lock_count--;
ASSERT(old_lock_count > 0);
// If this is our last unlock, update our permissions.
if (old_lock_count == 1) {
ASSERT(m_original_permission != KMemoryPermission::None);
m_permission = m_original_permission;
m_original_permission = KMemoryPermission::None;
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked);
}
if (left) {
const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
ASSERT(old_ipc_disable_merge_count > 0);
if (old_ipc_disable_merge_count == 1) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft);
}
}
}
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
return m_disable_merge_attribute;
} }
}; };
static_assert(std::is_trivially_destructible<KMemoryBlock>::value); static_assert(std::is_trivially_destructible<KMemoryBlock>::value);

View File

@ -2,221 +2,336 @@
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_memory_block_manager.h" #include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/memory_types.h"
namespace Kernel { namespace Kernel {
KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_) KMemoryBlockManager::KMemoryBlockManager() = default;
: start_addr{start_addr_}, end_addr{end_addr_} {
const u64 num_pages{(end_addr - start_addr) / PageSize}; Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free, // Allocate a block to encapsulate the address space, insert it into the tree.
KMemoryPermission::None, KMemoryAttribute::None); KMemoryBlock* start_block = slab_manager->Allocate();
R_UNLESS(start_block != nullptr, ResultOutOfResource);
// Set our start and end.
m_start_address = st;
m_end_address = nd;
ASSERT(Common::IsAligned(m_start_address, PageSize));
ASSERT(Common::IsAligned(m_end_address, PageSize));
// Initialize and insert the block.
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
m_memory_block_tree.insert(*start_block);
R_SUCCEED();
} }
KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) { void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
auto node{memory_block_tree.begin()}; HostUnmapCallback&& host_unmap_callback) {
while (node != end()) { // Erase every block until we have none left.
const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; auto it = m_memory_block_tree.begin();
if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) { while (it != m_memory_block_tree.end()) {
return node; KMemoryBlock* block = std::addressof(*it);
} it = m_memory_block_tree.erase(it);
node = std::next(node); slab_manager->Free(block);
} host_unmap_callback(block->GetAddress(), block->GetSize());
return end();
} }
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, ASSERT(m_memory_block_tree.empty());
std::size_t num_pages, std::size_t align,
std::size_t offset, std::size_t guard_pages) {
if (num_pages == 0) {
return {};
} }
const VAddr region_end{region_start + region_num_pages * PageSize}; VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
const VAddr region_last{region_end - 1}; size_t num_pages, size_t alignment, size_t offset,
for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) { size_t guard_pages) const {
const auto info{it->GetMemoryInfo()}; if (num_pages > 0) {
const VAddr region_end = region_start + region_num_pages * PageSize;
const VAddr region_last = region_end - 1;
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
it++) {
const KMemoryInfo info = it->GetMemoryInfo();
if (region_last < info.GetAddress()) { if (region_last < info.GetAddress()) {
break; break;
} }
if (info.m_state != KMemoryState::Free) {
if (info.state != KMemoryState::Free) {
continue; continue;
} }
VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
area += guard_pages * PageSize; area += guard_pages * PageSize;
const VAddr offset_area{Common::AlignDown(area, align) + offset}; const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
area = (area <= offset_area) ? offset_area : offset_area + align; area = (area <= offset_area) ? offset_area : offset_area + alignment;
const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
const VAddr area_last{area_end - 1}; const VAddr area_last = area_end - 1;
if (info.GetAddress() <= area && area < area_last && area_last <= region_last && if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
area_last <= info.GetLastAddress()) { area_last <= info.GetLastAddress()) {
return area; return area;
} }
} }
}
return {}; return {};
} }
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, VAddr address, size_t num_pages) {
// Find the iterator now that we've updated.
iterator it = this->FindIterator(address);
if (address != m_start_address) {
it--;
}
// Coalesce blocks that we can.
while (true) {
iterator prev = it++;
if (it == m_memory_block_tree.end()) {
break;
}
if (prev->CanMergeWith(*it)) {
KMemoryBlock* block = std::addressof(*it);
m_memory_block_tree.erase(it);
prev->Add(*block);
allocator->Free(block);
it = prev;
}
if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
break;
}
}
}
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
size_t num_pages, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
VAddr cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if (it->HasProperties(state, perm, attr)) {
// If we already have the right properties, just advance.
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages =
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
}
} else {
// If we need to, create a new block before and insert it.
if (cur_info.GetAddress() != cur_address) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
// If we need to, create a new block after and insert it.
if (cur_info.GetSize() > remaining_size) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
// Update block state.
it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
static_cast<u8>(clear_disable_attr));
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
VAddr address, size_t num_pages, KMemoryState test_state,
KMemoryPermission test_perm, KMemoryAttribute test_attr,
KMemoryState state, KMemoryPermission perm, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attribute) { KMemoryAttribute attr) {
const VAddr update_end_addr{addr + num_pages * PageSize}; // Ensure for auditing that we never end up with an invalid tree.
iterator node{memory_block_tree.begin()}; KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped; VAddr cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (node != memory_block_tree.end()) { while (remaining_pages > 0) {
KMemoryBlock* block{&(*node)}; const size_t remaining_size = remaining_pages * PageSize;
iterator next_node{std::next(node)}; KMemoryInfo cur_info = it->GetMemoryInfo();
const VAddr cur_addr{block->GetAddress()}; if (it->HasProperties(test_state, test_perm, test_attr) &&
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; !it->HasProperties(state, perm, attr)) {
// If we need to, create a new block before and insert it.
if (cur_info.GetAddress() != cur_address) {
KMemoryBlock* new_block = allocator->Allocate();
if (addr < cur_end_addr && cur_addr < update_end_addr) { it->Split(new_block, cur_address);
if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { it = m_memory_block_tree.insert(*new_block);
node = next_node; it++;
continue;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
} }
iterator new_node{node}; // If we need to, create a new block after and insert it.
if (addr > cur_addr) { if (cur_info.GetSize() > remaining_size) {
memory_block_tree.insert(node, block->Split(addr)); KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
} }
if (update_end_addr < cur_end_addr) { // Update block state.
new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); it->Update(state, perm, attr, false, 0, 0);
} cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
new_node->Update(state, perm, attribute); } else {
// If we already have the right properties, just advance.
MergeAdjacent(new_node, next_node); if (cur_address + remaining_size < cur_info.GetEndAddress()) {
} remaining_pages = 0;
cur_address += remaining_size;
if (cur_end_addr - 1 >= update_end_addr - 1) { } else {
break; remaining_pages =
} (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
node = next_node;
} }
} }
it++;
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state,
KMemoryPermission perm, KMemoryAttribute attribute) {
const VAddr update_end_addr{addr + num_pages * PageSize};
iterator node{memory_block_tree.begin()};
while (node != memory_block_tree.end()) {
KMemoryBlock* block{&(*node)};
iterator next_node{std::next(node)};
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
if (addr < cur_end_addr && cur_addr < update_end_addr) {
iterator new_node{node};
if (addr > cur_addr) {
memory_block_tree.insert(node, block->Split(addr));
} }
if (update_end_addr < cur_end_addr) { this->CoalesceForUpdate(allocator, address, num_pages);
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
} }
new_node->Update(state, perm, attribute); void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
size_t num_pages, MemoryBlockLockFunction lock_func,
MergeAdjacent(new_node, next_node);
}
if (cur_end_addr - 1 >= update_end_addr - 1) {
break;
}
node = next_node;
}
}
void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
KMemoryPermission perm) { KMemoryPermission perm) {
const VAddr update_end_addr{addr + num_pages * PageSize}; // Ensure for auditing that we never end up with an invalid tree.
iterator node{memory_block_tree.begin()}; KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
while (node != memory_block_tree.end()) { VAddr cur_address = address;
KMemoryBlock* block{&(*node)}; size_t remaining_pages = num_pages;
iterator next_node{std::next(node)}; iterator it = this->FindIterator(address);
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
if (addr < cur_end_addr && cur_addr < update_end_addr) { const VAddr end_address = address + (num_pages * PageSize);
iterator new_node{node};
if (addr > cur_addr) { while (remaining_pages > 0) {
memory_block_tree.insert(node, block->Split(addr)); const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
// If we need to, create a new block before and insert it.
if (cur_info.m_address != cur_address) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
} }
if (update_end_addr < cur_end_addr) { if (cur_info.GetSize() > remaining_size) {
new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); // If we need to, create a new block after and insert it.
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
} }
lock_func(new_node, perm); // Call the locked update function.
(std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address,
MergeAdjacent(new_node, next_node); cur_info.GetEndAddress() == end_address);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
it++;
} }
if (cur_end_addr - 1 >= update_end_addr - 1) { this->CoalesceForUpdate(allocator, address, num_pages);
break;
} }
node = next_node; // Debug.
bool KMemoryBlockManager::CheckState() const {
// Loop over every block, ensuring that we are sorted and coalesced.
auto it = m_memory_block_tree.cbegin();
auto prev = it++;
while (it != m_memory_block_tree.cend()) {
const KMemoryInfo prev_info = prev->GetMemoryInfo();
const KMemoryInfo cur_info = it->GetMemoryInfo();
// Sequential blocks which can be merged should be merged.
if (prev->CanMergeWith(*it)) {
return false;
}
// Sequential blocks should be sequential.
if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
return false;
}
// If the block is ipc locked, it must have a count.
if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
cur_info.m_ipc_lock_count == 0) {
return false;
}
// If the block is device shared, it must have a count.
if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
cur_info.m_device_use_count == 0) {
return false;
}
// Advance the iterator.
prev = it++;
}
// Our loop will miss checking the last block, potentially, so check it.
if (prev != m_memory_block_tree.cend()) {
const KMemoryInfo prev_info = prev->GetMemoryInfo();
// If the block is ipc locked, it must have a count.
if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
prev_info.m_ipc_lock_count == 0) {
return false;
}
// If the block is device shared, it must have a count.
if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
prev_info.m_device_use_count == 0) {
return false;
} }
} }
void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { return true;
const_iterator it{FindIterator(start)};
KMemoryInfo info{};
do {
info = it->GetMemoryInfo();
func(info);
it = std::next(it);
} while (info.addr + info.size - 1 < end - 1 && it != cend());
}
void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
KMemoryBlock* block{&(*it)};
auto EraseIt = [&](const iterator it_to_erase) {
if (next_it == it_to_erase) {
next_it = std::next(next_it);
}
memory_block_tree.erase(it_to_erase);
};
if (it != memory_block_tree.begin()) {
KMemoryBlock* prev{&(*std::prev(it))};
if (block->HasSameProperties(*prev)) {
const iterator prev_it{std::prev(it)};
prev->Add(block->GetNumPages());
EraseIt(it);
it = prev_it;
block = prev;
}
}
if (it != cend()) {
const KMemoryBlock* const next{&(*std::next(it))};
if (block->HasSameProperties(*next)) {
block->Add(next->GetNumPages());
EraseIt(std::next(it));
}
}
} }
} // namespace Kernel } // namespace Kernel

View File

@ -4,63 +4,154 @@
#pragma once #pragma once
#include <functional> #include <functional>
#include <list>
#include "common/common_funcs.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_memory_block.h" #include "core/hle/kernel/k_memory_block.h"
namespace Kernel { namespace Kernel {
class KMemoryBlockManagerUpdateAllocator {
public:
static constexpr size_t MaxBlocks = 2;
private:
KMemoryBlock* m_blocks[MaxBlocks];
size_t m_index;
KMemoryBlockSlabManager* m_slab_manager;
private:
Result Initialize(size_t num_blocks) {
// Check num blocks.
ASSERT(num_blocks <= MaxBlocks);
// Set index.
m_index = MaxBlocks - num_blocks;
// Allocate the blocks.
for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) {
m_blocks[m_index + i] = m_slab_manager->Allocate();
R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource);
}
R_SUCCEED();
}
public:
KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm,
size_t num_blocks = MaxBlocks)
: m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) {
*out_result = this->Initialize(num_blocks);
}
~KMemoryBlockManagerUpdateAllocator() {
for (const auto& block : m_blocks) {
if (block != nullptr) {
m_slab_manager->Free(block);
}
}
}
KMemoryBlock* Allocate() {
ASSERT(m_index < MaxBlocks);
ASSERT(m_blocks[m_index] != nullptr);
KMemoryBlock* block = nullptr;
std::swap(block, m_blocks[m_index++]);
return block;
}
void Free(KMemoryBlock* block) {
ASSERT(m_index <= MaxBlocks);
ASSERT(block != nullptr);
if (m_index == 0) {
m_slab_manager->Free(block);
} else {
m_blocks[--m_index] = block;
}
}
};
class KMemoryBlockManager final { class KMemoryBlockManager final {
public: public:
using MemoryBlockTree = std::list<KMemoryBlock>; using MemoryBlockTree =
Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left,
bool right);
using iterator = MemoryBlockTree::iterator; using iterator = MemoryBlockTree::iterator;
using const_iterator = MemoryBlockTree::const_iterator; using const_iterator = MemoryBlockTree::const_iterator;
public: public:
KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_); KMemoryBlockManager();
using HostUnmapCallback = std::function<void(VAddr, u64)>;
Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
iterator end() { iterator end() {
return memory_block_tree.end(); return m_memory_block_tree.end();
} }
const_iterator end() const { const_iterator end() const {
return memory_block_tree.end(); return m_memory_block_tree.end();
} }
const_iterator cend() const { const_iterator cend() const {
return memory_block_tree.cend(); return m_memory_block_tree.cend();
} }
iterator FindIterator(VAddr addr); VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
size_t alignment, size_t offset, size_t guard_pages) const;
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
std::size_t align, std::size_t offset, std::size_t guard_pages); KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr);
void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
MemoryBlockLockFunction lock_func, KMemoryPermission perm);
void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
KMemoryPermission perm, KMemoryAttribute attribute); KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr);
void Update(VAddr addr, std::size_t num_pages, KMemoryState state, iterator FindIterator(VAddr address) const {
KMemoryPermission perm = KMemoryPermission::None, return m_memory_block_tree.find(KMemoryBlock(
KMemoryAttribute attribute = KMemoryAttribute::None); address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
}
using LockFunc = std::function<void(iterator, KMemoryPermission)>; const KMemoryBlock* FindBlock(VAddr address) const {
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
KMemoryPermission perm); return std::addressof(*it);
}
using IterateFunc = std::function<void(const KMemoryInfo&)>; return nullptr;
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); }
KMemoryBlock& FindBlock(VAddr addr) { // Debug.
return *FindIterator(addr); bool CheckState() const;
private:
void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
size_t num_pages);
MemoryBlockTree m_memory_block_tree;
VAddr m_start_address{};
VAddr m_end_address{};
};
class KScopedMemoryBlockManagerAuditor {
public:
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {
ASSERT(m_manager->CheckState());
}
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m)
: KScopedMemoryBlockManagerAuditor(std::addressof(m)) {}
~KScopedMemoryBlockManagerAuditor() {
ASSERT(m_manager->CheckState());
} }
private: private:
void MergeAdjacent(iterator it, iterator& next_it); KMemoryBlockManager* m_manager;
[[maybe_unused]] const VAddr start_addr;
[[maybe_unused]] const VAddr end_addr;
MemoryBlockTree memory_block_tree;
}; };
} // namespace Kernel } // namespace Kernel

View File

@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
// Set all the allocated memory. // Set all the allocated memory.
for (const auto& block : out->Nodes()) { for (const auto& block : out->Nodes()) {
std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
block.GetSize()); block.GetSize());
} }

View File

@ -12,7 +12,7 @@ namespace Kernel {
KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) { KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
ASSERT(Common::IsAligned(phys_addr, PageSize)); ASSERT(Common::IsAligned(phys_addr, PageSize));
return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr)); return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
} }
} // namespace Kernel } // namespace Kernel

File diff suppressed because it is too large Load Diff

View File

@ -9,8 +9,10 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "common/page_table.h" #include "common/page_table.h"
#include "core/file_sys/program_metadata.h" #include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h" #include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h" #include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/result.h" #include "core/hle/result.h"
@ -34,58 +36,66 @@ public:
~KPageTable(); ~KPageTable();
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool); VAddr code_addr, size_t code_size,
Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, KMemoryBlockSlabManager* mem_block_slab_manager,
KMemoryManager::Pool pool);
void Finalize();
Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
KMemoryPermission perm); KMemoryPermission perm);
Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
ICacheInvalidationStrategy icache_invalidation_strategy); ICacheInvalidationStrategy icache_invalidation_strategy);
Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
VAddr src_addr); VAddr src_addr);
Result MapPhysicalMemory(VAddr addr, std::size_t size); Result MapPhysicalMemory(VAddr addr, size_t size);
Result UnmapPhysicalMemory(VAddr addr, std::size_t size); Result UnmapPhysicalMemory(VAddr addr, size_t size);
Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
KMemoryPermission perm); KMemoryPermission perm);
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
KMemoryState state, KMemoryPermission perm) { KMemoryState state, KMemoryPermission perm) {
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, this->GetRegionAddress(state),
state, perm); this->GetRegionSize(state) / PageSize, state, perm));
} }
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr); KMemoryInfo QueryInfo(VAddr addr);
Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
Result ResetTransferMemory(VAddr addr, std::size_t size); Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); Result SetMaxHeapSize(size_t size);
Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr); Result SetHeapSize(VAddr* out, size_t size);
Result SetMaxHeapSize(std::size_t size); ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
Result SetHeapSize(VAddr* out, std::size_t size); VAddr region_start, size_t region_num_pages,
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, KMemoryState state, KMemoryPermission perm,
bool is_map_only, VAddr region_start, PAddr map_addr = 0);
std::size_t region_num_pages, KMemoryState state,
KMemoryPermission perm, PAddr map_addr = 0); Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
Result LockForDeviceAddressSpace(VAddr addr, std::size_t size); bool is_aligned);
Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size);
Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg); Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
KMemoryState state_mask, KMemoryState state, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr); KMemoryAttribute attr_mask, KMemoryAttribute attr);
Common::PageTable& PageTableImpl() { Common::PageTable& PageTableImpl() {
return page_table_impl; return *m_page_table_impl;
} }
const Common::PageTable& PageTableImpl() const { const Common::PageTable& PageTableImpl() const {
return page_table_impl; return *m_page_table_impl;
} }
bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
private: private:
enum class OperationType : u32 { enum class OperationType : u32 {
@ -96,67 +106,65 @@ private:
ChangePermissionsAndRefresh, ChangePermissionsAndRefresh,
}; };
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
KMemoryAttribute::IpcLocked | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
KMemoryAttribute::DeviceShared;
Result InitializeMemoryLayout(VAddr start, VAddr end);
Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
bool is_pa_valid, VAddr region_start, std::size_t region_num_pages, bool is_pa_valid, VAddr region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm); KMemoryState state, KMemoryPermission perm);
Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const; bool IsRegionContiguous(VAddr addr, u64 size) const;
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list); void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
KMemoryInfo QueryInfoImpl(VAddr addr); KMemoryInfo QueryInfoImpl(VAddr addr);
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
std::size_t align); size_t align);
Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
OperationType operation); OperationType operation);
Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
OperationType operation, PAddr map_addr = 0); PAddr map_addr = 0);
VAddr GetRegionAddress(KMemoryState state) const; VAddr GetRegionAddress(KMemoryState state) const;
std::size_t GetRegionSize(KMemoryState state) const; size_t GetRegionSize(KMemoryState state) const;
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
std::size_t alignment, std::size_t offset, std::size_t guard_pages); size_t alignment, size_t offset, size_t guard_pages);
Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
KMemoryState state_mask, KMemoryState state, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const; KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask, Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask, KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr) const { KMemoryAttribute attr) const {
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
perm, attr_mask, attr); perm, attr_mask, attr));
} }
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const; KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr, KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
std::size_t size, KMemoryState state_mask, KMemoryState state, size_t size, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
KMemoryState state_mask, KMemoryState state, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); state_mask, state, perm_mask, perm, attr_mask, attr,
ignore_attr));
} }
Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
attr_mask, attr, ignore_attr); attr_mask, attr, ignore_attr));
} }
Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
@ -174,13 +182,13 @@ private:
bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
bool IsLockedByCurrentThread() const { bool IsLockedByCurrentThread() const {
return general_lock.IsLockedByCurrentThread(); return m_general_lock.IsLockedByCurrentThread();
} }
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
ASSERT(this->IsLockedByCurrentThread()); ASSERT(this->IsLockedByCurrentThread());
return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr); return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
} }
bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
@ -191,95 +199,93 @@ private:
return *out != 0; return *out != 0;
} }
mutable KLightLock general_lock; mutable KLightLock m_general_lock;
mutable KLightLock map_physical_memory_lock; mutable KLightLock m_map_physical_memory_lock;
std::unique_ptr<KMemoryBlockManager> block_manager;
public: public:
constexpr VAddr GetAddressSpaceStart() const { constexpr VAddr GetAddressSpaceStart() const {
return address_space_start; return m_address_space_start;
} }
constexpr VAddr GetAddressSpaceEnd() const { constexpr VAddr GetAddressSpaceEnd() const {
return address_space_end; return m_address_space_end;
} }
constexpr std::size_t GetAddressSpaceSize() const { constexpr size_t GetAddressSpaceSize() const {
return address_space_end - address_space_start; return m_address_space_end - m_address_space_start;
} }
constexpr VAddr GetHeapRegionStart() const { constexpr VAddr GetHeapRegionStart() const {
return heap_region_start; return m_heap_region_start;
} }
constexpr VAddr GetHeapRegionEnd() const { constexpr VAddr GetHeapRegionEnd() const {
return heap_region_end; return m_heap_region_end;
} }
constexpr std::size_t GetHeapRegionSize() const { constexpr size_t GetHeapRegionSize() const {
return heap_region_end - heap_region_start; return m_heap_region_end - m_heap_region_start;
} }
constexpr VAddr GetAliasRegionStart() const { constexpr VAddr GetAliasRegionStart() const {
return alias_region_start; return m_alias_region_start;
} }
constexpr VAddr GetAliasRegionEnd() const { constexpr VAddr GetAliasRegionEnd() const {
return alias_region_end; return m_alias_region_end;
} }
constexpr std::size_t GetAliasRegionSize() const { constexpr size_t GetAliasRegionSize() const {
return alias_region_end - alias_region_start; return m_alias_region_end - m_alias_region_start;
} }
constexpr VAddr GetStackRegionStart() const { constexpr VAddr GetStackRegionStart() const {
return stack_region_start; return m_stack_region_start;
} }
constexpr VAddr GetStackRegionEnd() const { constexpr VAddr GetStackRegionEnd() const {
return stack_region_end; return m_stack_region_end;
} }
constexpr std::size_t GetStackRegionSize() const { constexpr size_t GetStackRegionSize() const {
return stack_region_end - stack_region_start; return m_stack_region_end - m_stack_region_start;
} }
constexpr VAddr GetKernelMapRegionStart() const { constexpr VAddr GetKernelMapRegionStart() const {
return kernel_map_region_start; return m_kernel_map_region_start;
} }
constexpr VAddr GetKernelMapRegionEnd() const { constexpr VAddr GetKernelMapRegionEnd() const {
return kernel_map_region_end; return m_kernel_map_region_end;
} }
constexpr VAddr GetCodeRegionStart() const { constexpr VAddr GetCodeRegionStart() const {
return code_region_start; return m_code_region_start;
} }
constexpr VAddr GetCodeRegionEnd() const { constexpr VAddr GetCodeRegionEnd() const {
return code_region_end; return m_code_region_end;
} }
constexpr VAddr GetAliasCodeRegionStart() const { constexpr VAddr GetAliasCodeRegionStart() const {
return alias_code_region_start; return m_alias_code_region_start;
} }
constexpr VAddr GetAliasCodeRegionSize() const { constexpr VAddr GetAliasCodeRegionSize() const {
return alias_code_region_end - alias_code_region_start; return m_alias_code_region_end - m_alias_code_region_start;
} }
std::size_t GetNormalMemorySize() { size_t GetNormalMemorySize() {
KScopedLightLock lk(general_lock); KScopedLightLock lk(m_general_lock);
return GetHeapSize() + mapped_physical_memory_size; return GetHeapSize() + m_mapped_physical_memory_size;
} }
constexpr std::size_t GetAddressSpaceWidth() const { constexpr size_t GetAddressSpaceWidth() const {
return address_space_width; return m_address_space_width;
} }
constexpr std::size_t GetHeapSize() const { constexpr size_t GetHeapSize() const {
return current_heap_end - heap_region_start; return m_current_heap_end - m_heap_region_start;
} }
constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
return address_space_start <= address && address + size - 1 <= address_space_end - 1; return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
} }
constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
return alias_region_start > address || address + size - 1 > alias_region_end - 1; return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
} }
constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
return stack_region_start > address || address + size - 1 > stack_region_end - 1; return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
} }
constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
} }
constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
return address + size > heap_region_start && heap_region_end > address; return address + size > m_heap_region_start && m_heap_region_end > address;
} }
constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
return address + size > alias_region_start && alias_region_end > address; return address + size > m_alias_region_start && m_alias_region_end > address;
} }
constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
if (IsInvalidRegion(address, size)) { if (IsInvalidRegion(address, size)) {
return true; return true;
} }
@ -291,73 +297,78 @@ public:
} }
return {}; return {};
} }
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
return !IsOutsideASLRRegion(address, size); return !IsOutsideASLRRegion(address, size);
} }
constexpr std::size_t GetNumGuardPages() const { constexpr size_t GetNumGuardPages() const {
return IsKernel() ? 1 : 4; return IsKernel() ? 1 : 4;
} }
PAddr GetPhysicalAddr(VAddr addr) const { PAddr GetPhysicalAddr(VAddr addr) const {
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
ASSERT(backing_addr); ASSERT(backing_addr);
return backing_addr + addr; return backing_addr + addr;
} }
constexpr bool Contains(VAddr addr) const { constexpr bool Contains(VAddr addr) const {
return address_space_start <= addr && addr <= address_space_end - 1; return m_address_space_start <= addr && addr <= m_address_space_end - 1;
} }
constexpr bool Contains(VAddr addr, std::size_t size) const { constexpr bool Contains(VAddr addr, size_t size) const {
return address_space_start <= addr && addr < addr + size && return m_address_space_start <= addr && addr < addr + size &&
addr + size - 1 <= address_space_end - 1; addr + size - 1 <= m_address_space_end - 1;
} }
private: private:
constexpr bool IsKernel() const { constexpr bool IsKernel() const {
return is_kernel; return m_is_kernel;
} }
constexpr bool IsAslrEnabled() const { constexpr bool IsAslrEnabled() const {
return is_aslr_enabled; return m_enable_aslr;
} }
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
return (address_space_start <= addr) && return (m_address_space_start <= addr) &&
(num_pages <= (address_space_end - address_space_start) / PageSize) && (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
(addr + num_pages * PageSize - 1 <= address_space_end - 1); (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
} }
private: private:
VAddr address_space_start{}; VAddr m_address_space_start{};
VAddr address_space_end{}; VAddr m_address_space_end{};
VAddr heap_region_start{}; VAddr m_heap_region_start{};
VAddr heap_region_end{}; VAddr m_heap_region_end{};
VAddr current_heap_end{}; VAddr m_current_heap_end{};
VAddr alias_region_start{}; VAddr m_alias_region_start{};
VAddr alias_region_end{}; VAddr m_alias_region_end{};
VAddr stack_region_start{}; VAddr m_stack_region_start{};
VAddr stack_region_end{}; VAddr m_stack_region_end{};
VAddr kernel_map_region_start{}; VAddr m_kernel_map_region_start{};
VAddr kernel_map_region_end{}; VAddr m_kernel_map_region_end{};
VAddr code_region_start{}; VAddr m_code_region_start{};
VAddr code_region_end{}; VAddr m_code_region_end{};
VAddr alias_code_region_start{}; VAddr m_alias_code_region_start{};
VAddr alias_code_region_end{}; VAddr m_alias_code_region_end{};
std::size_t mapped_physical_memory_size{}; size_t m_mapped_physical_memory_size{};
std::size_t max_heap_size{}; size_t m_max_heap_size{};
std::size_t max_physical_memory_size{}; size_t m_max_physical_memory_size{};
std::size_t address_space_width{}; size_t m_address_space_width{};
bool is_kernel{}; KMemoryBlockManager m_memory_block_manager;
bool is_aslr_enabled{};
u32 heap_fill_value{}; bool m_is_kernel{};
const KMemoryRegion* cached_physical_heap_region{}; bool m_enable_aslr{};
bool m_enable_device_address_space_merge{};
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; KMemoryBlockSlabManager* m_memory_block_slab_manager{};
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
Common::PageTable page_table_impl; u32 m_heap_fill_value{};
const KMemoryRegion* m_cached_physical_heap_region{};
Core::System& system; KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
std::unique_ptr<Common::PageTable> m_page_table_impl;
Core::System& m_system;
}; };
} // namespace Kernel } // namespace Kernel

View File

@ -72,7 +72,8 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
process->name = std::move(process_name); process->name = std::move(process_name);
process->resource_limit = res_limit; process->resource_limit = res_limit;
process->status = ProcessStatus::Created; process->system_resource_address = 0;
process->state = State::Created;
process->program_id = 0; process->program_id = 0;
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
: kernel.CreateNewUserProcessID(); : kernel.CreateNewUserProcessID();
@ -92,11 +93,12 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
process->exception_thread = nullptr; process->exception_thread = nullptr;
process->is_suspended = false; process->is_suspended = false;
process->schedule_count = 0; process->schedule_count = 0;
process->is_handle_table_initialized = false;
// Open a reference to the resource limit. // Open a reference to the resource limit.
process->resource_limit->Open(); process->resource_limit->Open();
return ResultSuccess; R_SUCCEED();
} }
void KProcess::DoWorkerTaskImpl() { void KProcess::DoWorkerTaskImpl() {
@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() {
} }
} }
u64 KProcess::GetTotalPhysicalMemoryAvailable() const { u64 KProcess::GetTotalPhysicalMemoryAvailable() {
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size + page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
main_thread_stack_size}; main_thread_stack_size};
if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
capacity != pool_size) { capacity != pool_size) {
@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
return memory_usage_capacity; return memory_usage_capacity;
} }
u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
} }
u64 KProcess::GetTotalPhysicalMemoryUsed() const { u64 KProcess::GetTotalPhysicalMemoryUsed() {
return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() + return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() +
GetSystemResourceSize(); GetSystemResourceSize();
} }
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
} }
@ -244,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
shmem->Open(); shmem->Open();
shemen_info->Open(); shemen_info->Open();
return ResultSuccess; R_SUCCEED();
} }
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
@ -289,12 +291,12 @@ Result KProcess::Reset() {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{kernel};
// Validate that we're in a state that we can reset. // Validate that we're in a state that we can reset.
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); R_UNLESS(state != State::Terminated, ResultInvalidState);
R_UNLESS(is_signaled, ResultInvalidState); R_UNLESS(is_signaled, ResultInvalidState);
// Clear signaled. // Clear signaled.
is_signaled = false; is_signaled = false;
return ResultSuccess; R_SUCCEED();
} }
Result KProcess::SetActivity(ProcessActivity activity) { Result KProcess::SetActivity(ProcessActivity activity) {
@ -304,15 +306,13 @@ Result KProcess::SetActivity(ProcessActivity activity) {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{kernel};
// Validate our state. // Validate our state.
R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState); R_UNLESS(state != State::Terminating, ResultInvalidState);
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); R_UNLESS(state != State::Terminated, ResultInvalidState);
// Either pause or resume. // Either pause or resume.
if (activity == ProcessActivity::Paused) { if (activity == ProcessActivity::Paused) {
// Verify that we're not suspended. // Verify that we're not suspended.
if (is_suspended) { R_UNLESS(!is_suspended, ResultInvalidState);
return ResultInvalidState;
}
// Suspend all threads. // Suspend all threads.
for (auto* thread : GetThreadList()) { for (auto* thread : GetThreadList()) {
@ -325,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
ASSERT(activity == ProcessActivity::Runnable); ASSERT(activity == ProcessActivity::Runnable);
// Verify that we're suspended. // Verify that we're suspended.
if (!is_suspended) { R_UNLESS(is_suspended, ResultInvalidState);
return ResultInvalidState;
}
// Resume all threads. // Resume all threads.
for (auto* thread : GetThreadList()) { for (auto* thread : GetThreadList()) {
@ -338,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
SetSuspended(false); SetSuspended(false);
} }
return ResultSuccess; R_SUCCEED();
} }
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
@ -348,35 +346,38 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
system_resource_size = metadata.GetSystemResourceSize(); system_resource_size = metadata.GetSystemResourceSize();
image_size = code_size; image_size = code_size;
// We currently do not support process-specific system resource
UNIMPLEMENTED_IF(system_resource_size != 0);
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
code_size + system_resource_size); code_size + system_resource_size);
if (!memory_reservation.Succeeded()) { if (!memory_reservation.Succeeded()) {
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
code_size + system_resource_size); code_size + system_resource_size);
return ResultLimitReached; R_RETURN(ResultLimitReached);
} }
// Initialize proces address space // Initialize proces address space
if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, if (const Result result{page_table.InitializeForProcess(
0x8000000, code_size, metadata.GetAddressSpaceType(), false, 0x8000000, code_size,
KMemoryManager::Pool::Application)}; &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)};
result.IsError()) { result.IsError()) {
return result; R_RETURN(result);
} }
// Map process code region // Map process code region
if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(), if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(),
code_size / PageSize, KMemoryState::Code, code_size / PageSize, KMemoryState::Code,
KMemoryPermission::None)}; KMemoryPermission::None)};
result.IsError()) { result.IsError()) {
return result; R_RETURN(result);
} }
// Initialize process capabilities // Initialize process capabilities
const auto& caps{metadata.GetKernelCapabilities()}; const auto& caps{metadata.GetKernelCapabilities()};
if (const Result result{ if (const Result result{
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)}; capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
result.IsError()) { result.IsError()) {
return result; R_RETURN(result);
} }
// Set memory usage capacity // Set memory usage capacity
@ -384,12 +385,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
case FileSys::ProgramAddressSpaceType::Is32Bit: case FileSys::ProgramAddressSpaceType::Is32Bit:
case FileSys::ProgramAddressSpaceType::Is36Bit: case FileSys::ProgramAddressSpaceType::Is36Bit:
case FileSys::ProgramAddressSpaceType::Is39Bit: case FileSys::ProgramAddressSpaceType::Is39Bit:
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart(); memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart();
break; break;
case FileSys::ProgramAddressSpaceType::Is32BitNoMap: case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() + memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() +
page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart(); page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart();
break; break;
default: default:
@ -397,10 +398,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
} }
// Create TLS region // Create TLS region
R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address))); R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
memory_reservation.Commit(); memory_reservation.Commit();
return handle_table.Initialize(capabilities.GetHandleTableSize()); R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
} }
void KProcess::Run(s32 main_thread_priority, u64 stack_size) { void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
@ -409,15 +410,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError()); ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
ChangeStatus(ProcessStatus::Running); ChangeState(State::Running);
SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
} }
void KProcess::PrepareForTermination() { void KProcess::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exiting); ChangeState(State::Terminating);
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
for (auto* thread : in_thread_list) { for (auto* thread : in_thread_list) {
@ -437,15 +438,15 @@ void KProcess::PrepareForTermination() {
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
this->DeleteThreadLocalRegion(tls_region_address); this->DeleteThreadLocalRegion(plr_address);
tls_region_address = 0; plr_address = 0;
if (resource_limit) { if (resource_limit) {
resource_limit->Release(LimitableResource::PhysicalMemory, resource_limit->Release(LimitableResource::PhysicalMemory,
main_thread_stack_size + image_size); main_thread_stack_size + image_size);
} }
ChangeStatus(ProcessStatus::Exited); ChangeState(State::Terminated);
} }
void KProcess::Finalize() { void KProcess::Finalize() {
@ -474,7 +475,7 @@ void KProcess::Finalize() {
} }
// Finalize the page table. // Finalize the page table.
page_table.reset(); page_table.Finalize();
// Perform inherited finalization. // Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
@ -499,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
} }
*out = tlr; *out = tlr;
return ResultSuccess; R_SUCCEED();
} }
} }
@ -528,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
// We succeeded! // We succeeded!
tlp_guard.Cancel(); tlp_guard.Cancel();
*out = tlr; *out = tlr;
return ResultSuccess; R_SUCCEED();
} }
Result KProcess::DeleteThreadLocalRegion(VAddr addr) { Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
@ -576,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
KThreadLocalPage::Free(kernel, page_to_free); KThreadLocalPage::Free(kernel, page_to_free);
} }
return ResultSuccess; R_SUCCEED();
} }
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
@ -628,7 +629,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
const auto ReprotectSegment = [&](const CodeSet::Segment& segment, const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
Svc::MemoryPermission permission) { Svc::MemoryPermission permission) {
page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
}; };
kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
@ -645,19 +646,18 @@ bool KProcess::IsSignaled() const {
} }
KProcess::KProcess(KernelCore& kernel_) KProcess::KProcess(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>( : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()},
kernel_.System())},
handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
state_lock{kernel_}, list_lock{kernel_} {} state_lock{kernel_}, list_lock{kernel_} {}
KProcess::~KProcess() = default; KProcess::~KProcess() = default;
void KProcess::ChangeStatus(ProcessStatus new_status) { void KProcess::ChangeState(State new_state) {
if (status == new_status) { if (state == new_state) {
return; return;
} }
status = new_status; state = new_state;
is_signaled = true; is_signaled = true;
NotifyAvailable(); NotifyAvailable();
} }
@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
// The kernel always ensures that the given stack size is page aligned. // The kernel always ensures that the given stack size is page aligned.
main_thread_stack_size = Common::AlignUp(stack_size, PageSize); main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
const VAddr start{page_table->GetStackRegionStart()}; const VAddr start{page_table.GetStackRegionStart()};
const std::size_t size{page_table->GetStackRegionEnd() - start}; const std::size_t size{page_table.GetStackRegionEnd() - start};
CASCADE_RESULT(main_thread_stack_top, CASCADE_RESULT(main_thread_stack_top,
page_table->AllocateAndMapMemory( page_table.AllocateAndMapMemory(
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
KMemoryState::Stack, KMemoryPermission::UserReadWrite)); KMemoryState::Stack, KMemoryPermission::UserReadWrite));
main_thread_stack_top += main_thread_stack_size; main_thread_stack_top += main_thread_stack_size;
return ResultSuccess; R_SUCCEED();
} }
} // namespace Kernel } // namespace Kernel

View File

@ -13,6 +13,7 @@
#include "core/hle/kernel/k_auto_object.h" #include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread_local_page.h" #include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_worker_task.h" #include "core/hle/kernel/k_worker_task.h"
@ -31,7 +32,6 @@ class ProgramMetadata;
namespace Kernel { namespace Kernel {
class KernelCore; class KernelCore;
class KPageTable;
class KResourceLimit; class KResourceLimit;
class KThread; class KThread;
class KSharedMemoryInfo; class KSharedMemoryInfo;
@ -45,24 +45,6 @@ enum class MemoryRegion : u16 {
BASE = 3, BASE = 3,
}; };
/**
* Indicates the status of a Process instance.
*
* @note These match the values as used by kernel,
* so new entries should only be added if RE
* shows that a new value has been introduced.
*/
enum class ProcessStatus {
Created,
CreatedWithDebuggerAttached,
Running,
WaitingForDebuggerToAttach,
DebuggerAttached,
Exiting,
Exited,
DebugBreak,
};
enum class ProcessActivity : u32 { enum class ProcessActivity : u32 {
Runnable, Runnable,
Paused, Paused,
@ -89,6 +71,17 @@ public:
explicit KProcess(KernelCore& kernel_); explicit KProcess(KernelCore& kernel_);
~KProcess() override; ~KProcess() override;
enum class State {
Created = static_cast<u32>(Svc::ProcessState::Created),
CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
Running = static_cast<u32>(Svc::ProcessState::Running),
Crashed = static_cast<u32>(Svc::ProcessState::Crashed),
RunningAttached = static_cast<u32>(Svc::ProcessState::RunningAttached),
Terminating = static_cast<u32>(Svc::ProcessState::Terminating),
Terminated = static_cast<u32>(Svc::ProcessState::Terminated),
DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
};
enum : u64 { enum : u64 {
/// Lowest allowed process ID for a kernel initial process. /// Lowest allowed process ID for a kernel initial process.
InitialKIPIDMin = 1, InitialKIPIDMin = 1,
@ -114,12 +107,12 @@ public:
/// Gets a reference to the process' page table. /// Gets a reference to the process' page table.
KPageTable& PageTable() { KPageTable& PageTable() {
return *page_table; return page_table;
} }
/// Gets const a reference to the process' page table. /// Gets const a reference to the process' page table.
const KPageTable& PageTable() const { const KPageTable& PageTable() const {
return *page_table; return page_table;
} }
/// Gets a reference to the process' handle table. /// Gets a reference to the process' handle table.
@ -145,26 +138,25 @@ public:
} }
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
return condition_var.Wait(address, cv_key, tag, ns); R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
} }
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
return address_arbiter.SignalToAddress(address, signal_type, value, count); R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
} }
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
s64 timeout) { s64 timeout) {
return address_arbiter.WaitForAddress(address, arb_type, value, timeout); R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
} }
/// Gets the address to the process' dedicated TLS region. VAddr GetProcessLocalRegionAddress() const {
VAddr GetTLSRegionAddress() const { return plr_address;
return tls_region_address;
} }
/// Gets the current status of the process /// Gets the current status of the process
ProcessStatus GetStatus() const { State GetState() const {
return status; return state;
} }
/// Gets the unique ID that identifies this particular process. /// Gets the unique ID that identifies this particular process.
@ -286,18 +278,18 @@ public:
} }
/// Retrieves the total physical memory available to this process in bytes. /// Retrieves the total physical memory available to this process in bytes.
u64 GetTotalPhysicalMemoryAvailable() const; u64 GetTotalPhysicalMemoryAvailable();
/// Retrieves the total physical memory available to this process in bytes, /// Retrieves the total physical memory available to this process in bytes,
/// without the size of the personal system resource heap added to it. /// without the size of the personal system resource heap added to it.
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const; u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
/// Retrieves the total physical memory used by this process in bytes. /// Retrieves the total physical memory used by this process in bytes.
u64 GetTotalPhysicalMemoryUsed() const; u64 GetTotalPhysicalMemoryUsed();
/// Retrieves the total physical memory used by this process in bytes, /// Retrieves the total physical memory used by this process in bytes,
/// without the size of the personal system resource heap added to it. /// without the size of the personal system resource heap added to it.
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const; u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
/// Gets the list of all threads created with this process as their owner. /// Gets the list of all threads created with this process as their owner.
std::list<KThread*>& GetThreadList() { std::list<KThread*>& GetThreadList() {
@ -415,19 +407,24 @@ private:
pinned_threads[core_id] = nullptr; pinned_threads[core_id] = nullptr;
} }
/// Changes the process status. If the status is different void FinalizeHandleTable() {
/// from the current process status, then this will trigger // Finalize the table.
/// a process signal. handle_table.Finalize();
void ChangeStatus(ProcessStatus new_status);
// Note that the table is finalized.
is_handle_table_initialized = false;
}
void ChangeState(State new_state);
/// Allocates the main thread stack for the process, given the stack size in bytes. /// Allocates the main thread stack for the process, given the stack size in bytes.
Result AllocateMainThreadStack(std::size_t stack_size); Result AllocateMainThreadStack(std::size_t stack_size);
/// Memory manager for this process /// Memory manager for this process
std::unique_ptr<KPageTable> page_table; KPageTable page_table;
/// Current status of the process /// Current status of the process
ProcessStatus status{}; State state{};
/// The ID of this process /// The ID of this process
u64 process_id = 0; u64 process_id = 0;
@ -443,6 +440,8 @@ private:
/// Resource limit descriptor for this process /// Resource limit descriptor for this process
KResourceLimit* resource_limit{}; KResourceLimit* resource_limit{};
VAddr system_resource_address{};
/// The ideal CPU core for this process, threads are scheduled on this core by default. /// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 ideal_core = 0; u8 ideal_core = 0;
@ -469,7 +468,7 @@ private:
KConditionVariable condition_var; KConditionVariable condition_var;
/// Address indicating the location of the process' dedicated TLS region. /// Address indicating the location of the process' dedicated TLS region.
VAddr tls_region_address = 0; VAddr plr_address = 0;
/// Random values for svcGetInfo RandomEntropy /// Random values for svcGetInfo RandomEntropy
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
@ -495,8 +494,12 @@ private:
/// Schedule count of this process /// Schedule count of this process
s64 schedule_count{}; s64 schedule_count{};
size_t memory_release_hint{};
bool is_signaled{}; bool is_signaled{};
bool is_suspended{}; bool is_suspended{};
bool is_immortal{};
bool is_handle_table_initialized{};
bool is_initialized{}; bool is_initialized{};
std::atomic<u16> num_running_threads{}; std::atomic<u16> num_running_threads{};

View File

@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
is_initialized = true; is_initialized = true;
// Clear all pages in the memory. // Clear all pages in the memory.
std::memset(device_memory_.GetPointer(physical_address_), 0, size_); std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_);
return ResultSuccess; return ResultSuccess;
} }

View File

@ -54,7 +54,7 @@ public:
* @return A pointer to the shared memory block from the specified offset * @return A pointer to the shared memory block from the specified offset
*/ */
u8* GetPointer(std::size_t offset = 0) { u8* GetPointer(std::size_t offset = 0) {
return device_memory->GetPointer(physical_address + offset); return device_memory->GetPointer<u8>(physical_address + offset);
} }
/** /**
@ -63,7 +63,7 @@ public:
* @return A pointer to the shared memory block from the specified offset * @return A pointer to the shared memory block from the specified offset
*/ */
const u8* GetPointer(std::size_t offset = 0) const { const u8* GetPointer(std::size_t offset = 0) const {
return device_memory->GetPointer(physical_address + offset); return device_memory->GetPointer<u8>(physical_address + offset);
} }
void Finalize() override; void Finalize() override;

View File

@ -8,7 +8,6 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/common_funcs.h" #include "common/common_funcs.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/spin_lock.h"
namespace Kernel { namespace Kernel {
@ -37,34 +36,28 @@ public:
} }
void* Allocate() { void* Allocate() {
// KScopedInterruptDisable di; Node* ret = m_head.load();
m_lock.lock(); do {
if (ret == nullptr) {
Node* ret = m_head; break;
if (ret != nullptr) [[likely]] {
m_head = ret->next;
} }
} while (!m_head.compare_exchange_weak(ret, ret->next));
m_lock.unlock();
return ret; return ret;
} }
void Free(void* obj) { void Free(void* obj) {
// KScopedInterruptDisable di;
m_lock.lock();
Node* node = static_cast<Node*>(obj); Node* node = static_cast<Node*>(obj);
node->next = m_head;
m_head = node;
m_lock.unlock(); Node* cur_head = m_head.load();
do {
node->next = cur_head;
} while (!m_head.compare_exchange_weak(cur_head, node));
} }
private: private:
std::atomic<Node*> m_head{}; std::atomic<Node*> m_head{};
Common::SpinLock m_lock;
}; };
} // namespace impl } // namespace impl

View File

@ -30,6 +30,7 @@
#include "core/hle/kernel/k_worker_task_manager.h" #include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h" #include "core/hle/result.h"
#include "core/memory.h" #include "core/memory.h"
@ -38,6 +39,9 @@
#endif #endif
namespace { namespace {
constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1;
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
u32 entry_point, u32 arg) { u32 entry_point, u32 arg) {
context = {}; context = {};
@ -241,7 +245,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
} }
} }
return ResultSuccess; R_SUCCEED();
} }
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
@ -254,7 +258,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_
thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
thread->is_single_core = !Settings::values.use_multi_core.GetValue(); thread->is_single_core = !Settings::values.use_multi_core.GetValue();
return ResultSuccess; R_SUCCEED();
} }
Result KThread::InitializeDummyThread(KThread* thread) { Result KThread::InitializeDummyThread(KThread* thread) {
@ -264,31 +268,32 @@ Result KThread::InitializeDummyThread(KThread* thread) {
// Initialize emulation parameters. // Initialize emulation parameters.
thread->stack_parameters.disable_count = 0; thread->stack_parameters.disable_count = 0;
return ResultSuccess; R_SUCCEED();
} }
Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
system.GetCpuManager().GetGuestActivateFunc()); ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc()));
} }
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
system.GetCpuManager().GetIdleThreadStartFunc()); ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc()));
} }
Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
KThreadFunction func, uintptr_t arg, s32 virt_core) { KThreadFunction func, uintptr_t arg, s32 virt_core) {
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr,
system.GetCpuManager().GetShutdownThreadStartFunc()); ThreadType::HighPriority,
system.GetCpuManager().GetShutdownThreadStartFunc()));
} }
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
KProcess* owner) { KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread); system.Kernel().GlobalSchedulerContext().AddThread(thread);
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()); ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
} }
void KThread::PostDestroy(uintptr_t arg) { void KThread::PostDestroy(uintptr_t arg) {
@ -538,7 +543,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
*out_ideal_core = virtual_ideal_core_id; *out_ideal_core = virtual_ideal_core_id;
*out_affinity_mask = virtual_affinity_mask; *out_affinity_mask = virtual_affinity_mask;
return ResultSuccess; R_SUCCEED();
} }
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
@ -554,7 +559,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask)
*out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
} }
return ResultSuccess; R_SUCCEED();
} }
Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
@ -666,7 +671,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
} while (retry_update); } while (retry_update);
} }
return ResultSuccess; R_SUCCEED();
} }
void KThread::SetBasePriority(s32 value) { void KThread::SetBasePriority(s32 value) {
@ -839,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
} while (thread_is_current); } while (thread_is_current);
} }
return ResultSuccess; R_SUCCEED();
} }
Result KThread::GetThreadContext3(std::vector<u8>& out) { Result KThread::GetThreadContext3(std::vector<u8>& out) {
@ -874,7 +879,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
} }
} }
return ResultSuccess; R_SUCCEED();
} }
void KThread::AddWaiterImpl(KThread* thread) { void KThread::AddWaiterImpl(KThread* thread) {
@ -1038,7 +1043,7 @@ Result KThread::Run() {
// Set our state and finish. // Set our state and finish.
SetState(ThreadState::Runnable); SetState(ThreadState::Runnable);
return ResultSuccess; R_SUCCEED();
} }
} }
@ -1073,6 +1078,78 @@ void KThread::Exit() {
UNREACHABLE_MSG("KThread::Exit() would return"); UNREACHABLE_MSG("KThread::Exit() would return");
} }
Result KThread::Terminate() {
ASSERT(this != GetCurrentThreadPointer(kernel));
// Request the thread terminate if it hasn't already.
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
// If the thread isn't terminated, wait for it to terminate.
s32 index;
KSynchronizationObject* objects[] = {this};
R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1,
Svc::WaitInfinite));
}
R_SUCCEED();
}
ThreadState KThread::RequestTerminate() {
ASSERT(this != GetCurrentThreadPointer(kernel));
KScopedSchedulerLock sl{kernel};
// Determine if this is the first termination request.
const bool first_request = [&]() -> bool {
// Perform an atomic compare-and-swap from false to true.
bool expected = false;
return termination_requested.compare_exchange_strong(expected, true);
}();
// If this is the first request, start termination procedure.
if (first_request) {
// If the thread is in initialized state, just change state to terminated.
if (this->GetState() == ThreadState::Initialized) {
thread_state = ThreadState::Terminated;
return ThreadState::Terminated;
}
// Register the terminating dpc.
this->RegisterDpc(DpcFlag::Terminating);
// If the thread is pinned, unpin it.
if (this->GetStackParameters().is_pinned) {
this->GetOwnerProcess()->UnpinThread(this);
}
// If the thread is suspended, continue it.
if (this->IsSuspended()) {
suspend_allowed_flags = 0;
this->UpdateState();
}
// Change the thread's priority to be higher than any system thread's.
if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) {
this->SetBasePriority(TerminatingThreadPriority);
}
// If the thread is runnable, send a termination interrupt to other cores.
if (this->GetState() == ThreadState::Runnable) {
if (const u64 core_mask =
physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel));
core_mask != 0) {
Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask);
}
}
// Wake up the thread.
if (this->GetState() == ThreadState::Waiting) {
wait_queue->CancelWait(this, ResultTerminationRequested, true);
}
}
return this->GetState();
}
Result KThread::Sleep(s64 timeout) { Result KThread::Sleep(s64 timeout) {
ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
ASSERT(this == GetCurrentThreadPointer(kernel)); ASSERT(this == GetCurrentThreadPointer(kernel));
@ -1086,7 +1163,7 @@ Result KThread::Sleep(s64 timeout) {
// Check if the thread should terminate. // Check if the thread should terminate.
if (this->IsTerminationRequested()) { if (this->IsTerminationRequested()) {
slp.CancelSleep(); slp.CancelSleep();
return ResultTerminationRequested; R_THROW(ResultTerminationRequested);
} }
// Wait for the sleep to end. // Wait for the sleep to end.
@ -1094,7 +1171,7 @@ Result KThread::Sleep(s64 timeout) {
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
} }
return ResultSuccess; R_SUCCEED();
} }
void KThread::IfDummyThreadTryWait() { void KThread::IfDummyThreadTryWait() {

View File

@ -180,6 +180,10 @@ public:
void Exit(); void Exit();
Result Terminate();
ThreadState RequestTerminate();
[[nodiscard]] u32 GetSuspendFlags() const { [[nodiscard]] u32 GetSuspendFlags() const {
return suspend_allowed_flags & suspend_request_flags; return suspend_allowed_flags & suspend_request_flags;
} }

View File

@ -24,6 +24,7 @@
#include "core/hardware_properties.h" #include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h" #include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_client_port.h" #include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h" #include "core/hle/kernel/k_memory_manager.h"
@ -73,8 +74,16 @@ struct KernelCore::Impl {
InitializeMemoryLayout(); InitializeMemoryLayout();
Init::InitializeKPageBufferSlabHeap(system); Init::InitializeKPageBufferSlabHeap(system);
InitializeShutdownThreads(); InitializeShutdownThreads();
InitializePreemption(kernel);
InitializePhysicalCores(); InitializePhysicalCores();
InitializePreemption(kernel);
// Initialize the Dynamic Slab Heaps.
{
const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion();
ASSERT(pt_heap_region.GetEndAddress() != 0);
InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
}
RegisterHostThread(); RegisterHostThread();
} }
@ -86,6 +95,15 @@ struct KernelCore::Impl {
} }
} }
void CloseCurrentProcess() {
(*current_process).Finalize();
// current_process->Close();
// TODO: The current process should be destroyed based on accurate ref counting after
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
(*current_process).Destroy();
current_process = nullptr;
}
void Shutdown() { void Shutdown() {
is_shutting_down.store(true, std::memory_order_relaxed); is_shutting_down.store(true, std::memory_order_relaxed);
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
@ -99,10 +117,6 @@ struct KernelCore::Impl {
next_user_process_id = KProcess::ProcessIDMin; next_user_process_id = KProcess::ProcessIDMin;
next_thread_id = 1; next_thread_id = 1;
for (auto& core : cores) {
core = nullptr;
}
global_handle_table->Finalize(); global_handle_table->Finalize();
global_handle_table.reset(); global_handle_table.reset();
@ -152,15 +166,7 @@ struct KernelCore::Impl {
} }
} }
// Shutdown all processes. CloseCurrentProcess();
if (current_process) {
(*current_process).Finalize();
// current_process->Close();
// TODO: The current process should be destroyed based on accurate ref counting after
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
(*current_process).Destroy();
current_process = nullptr;
}
// Track kernel objects that were not freed on shutdown // Track kernel objects that were not freed on shutdown
{ {
@ -257,6 +263,18 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
} }
void InitializeResourceManagers(VAddr address, size_t size) {
dynamic_page_manager = std::make_unique<KDynamicPageManager>();
memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
dynamic_page_manager->Initialize(address, size);
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
memory_block_heap->Initialize(dynamic_page_manager.get(),
ApplicationMemoryBlockSlabHeapSize);
app_memory_block_manager->Initialize(nullptr, memory_block_heap.get());
}
void InitializeShutdownThreads() { void InitializeShutdownThreads() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
shutdown_threads[core_id] = KThread::Create(system.Kernel()); shutdown_threads[core_id] = KThread::Create(system.Kernel());
@ -344,11 +362,6 @@ struct KernelCore::Impl {
static inline thread_local KThread* current_thread{nullptr}; static inline thread_local KThread* current_thread{nullptr};
KThread* GetCurrentEmuThread() { KThread* GetCurrentEmuThread() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (IsShuttingDown()) {
return {};
}
const auto thread_id = GetCurrentHostThreadID(); const auto thread_id = GetCurrentHostThreadID();
if (thread_id >= Core::Hardware::NUM_CPU_CORES) { if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
return GetHostDummyThread(); return GetHostDummyThread();
@ -770,6 +783,11 @@ struct KernelCore::Impl {
// Kernel memory management // Kernel memory management
std::unique_ptr<KMemoryManager> memory_manager; std::unique_ptr<KMemoryManager> memory_manager;
// Dynamic slab managers
std::unique_ptr<KDynamicPageManager> dynamic_page_manager;
std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap;
std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager;
// Shared memory for services // Shared memory for services
Kernel::KSharedMemory* hid_shared_mem{}; Kernel::KSharedMemory* hid_shared_mem{};
Kernel::KSharedMemory* font_shared_mem{}; Kernel::KSharedMemory* font_shared_mem{};
@ -853,6 +871,10 @@ const KProcess* KernelCore::CurrentProcess() const {
return impl->current_process; return impl->current_process;
} }
void KernelCore::CloseCurrentProcess() {
impl->CloseCurrentProcess();
}
const std::vector<KProcess*>& KernelCore::GetProcessList() const { const std::vector<KProcess*>& KernelCore::GetProcessList() const {
return impl->process_list; return impl->process_list;
} }
@ -1041,6 +1063,14 @@ const KMemoryManager& KernelCore::MemoryManager() const {
return *impl->memory_manager; return *impl->memory_manager;
} }
KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() {
return *impl->app_memory_block_manager;
}
const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const {
return *impl->app_memory_block_manager;
}
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
return *impl->hid_shared_mem; return *impl->hid_shared_mem;
} }

View File

@ -37,6 +37,7 @@ class KClientSession;
class KEvent; class KEvent;
class KHandleTable; class KHandleTable;
class KLinkedListNode; class KLinkedListNode;
class KMemoryBlockSlabManager;
class KMemoryLayout; class KMemoryLayout;
class KMemoryManager; class KMemoryManager;
class KPageBuffer; class KPageBuffer;
@ -130,6 +131,9 @@ public:
/// Retrieves a const pointer to the current process. /// Retrieves a const pointer to the current process.
const KProcess* CurrentProcess() const; const KProcess* CurrentProcess() const;
/// Closes the current process.
void CloseCurrentProcess();
/// Retrieves the list of processes. /// Retrieves the list of processes.
const std::vector<KProcess*>& GetProcessList() const; const std::vector<KProcess*>& GetProcessList() const;
@ -238,6 +242,12 @@ public:
/// Gets the virtual memory manager for the kernel. /// Gets the virtual memory manager for the kernel.
const KMemoryManager& MemoryManager() const; const KMemoryManager& MemoryManager() const;
/// Gets the application memory block manager for the kernel.
KMemoryBlockSlabManager& GetApplicationMemoryBlockManager();
/// Gets the application memory block manager for the kernel.
const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const;
/// Gets the shared memory object for HID services. /// Gets the shared memory object for HID services.
Kernel::KSharedMemory& GetHidSharedMem(); Kernel::KSharedMemory& GetHidSharedMem();

View File

@ -933,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han
return ResultSuccess; return ResultSuccess;
case GetInfoType::UserExceptionContextAddr: case GetInfoType::UserExceptionContextAddr:
*result = process->GetTLSRegionAddress(); *result = process->GetProcessLocalRegionAddress();
return ResultSuccess; return ResultSuccess;
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
@ -1888,7 +1888,7 @@ static void ExitProcess(Core::System& system) {
auto* current_process = system.Kernel().CurrentProcess(); auto* current_process = system.Kernel().CurrentProcess();
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
"Process has already exited"); "Process has already exited");
system.Exit(); system.Exit();
@ -2557,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand
return ResultInvalidEnumValue; return ResultInvalidEnumValue;
} }
*out = static_cast<u64>(process->GetStatus()); *out = static_cast<u64>(process->GetState());
return ResultSuccess; return ResultSuccess;
} }

View File

@ -14,8 +14,11 @@ namespace Kernel::Svc {
using namespace Common::Literals; using namespace Common::Literals;
constexpr s32 ArgumentHandleCountMax = 0x40; constexpr inline s32 ArgumentHandleCountMax = 0x40;
constexpr u32 HandleWaitMask{1u << 30};
constexpr inline u32 HandleWaitMask = 1u << 30;
constexpr inline s64 WaitInfinite = -1;
constexpr inline std::size_t HeapSizeAlignment = 2_MiB; constexpr inline std::size_t HeapSizeAlignment = 2_MiB;

View File

@ -95,6 +95,19 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
constexpr inline s32 LowestThreadPriority = 63; constexpr inline s32 LowestThreadPriority = 63;
constexpr inline s32 HighestThreadPriority = 0; constexpr inline s32 HighestThreadPriority = 0;
constexpr inline s32 SystemThreadPriorityHighest = 16;
enum class ProcessState : u32 {
Created = 0,
CreatedAttached = 1,
Running = 2,
Crashed = 3,
RunningAttached = 4,
Terminating = 5,
Terminated = 6,
DebugBreak = 7,
};
constexpr inline size_t ThreadLocalRegionSize = 0x200; constexpr inline size_t ThreadLocalRegionSize = 0x200;
} // namespace Kernel::Svc } // namespace Kernel::Svc

View File

@ -135,6 +135,14 @@ union Result {
[[nodiscard]] constexpr bool IsFailure() const { [[nodiscard]] constexpr bool IsFailure() const {
return !IsSuccess(); return !IsSuccess();
} }
[[nodiscard]] constexpr u32 GetInnerValue() const {
return static_cast<u32>(module.Value()) | (description << module.bits);
}
[[nodiscard]] constexpr bool Includes(Result result) const {
return GetInnerValue() == result.GetInnerValue();
}
}; };
static_assert(std::is_trivial_v<Result>); static_assert(std::is_trivial_v<Result>);
@ -462,9 +470,6 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
#define R_UNLESS(expr, res) \ #define R_UNLESS(expr, res) \
{ \ { \
if (!(expr)) { \ if (!(expr)) { \
if (res.IsError()) { \
LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \
} \
R_THROW(res); \ R_THROW(res); \
} \ } \
} }

View File

@ -290,7 +290,7 @@ public:
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
const auto start_info{page_table.QueryInfo(start - 1)}; const auto start_info{page_table.QueryInfo(start - 1)};
if (start_info.state != Kernel::KMemoryState::Free) { if (start_info.GetState() != Kernel::KMemoryState::Free) {
return {}; return {};
} }
@ -300,7 +300,7 @@ public:
const auto end_info{page_table.QueryInfo(start + size)}; const auto end_info{page_table.QueryInfo(start + size)};
if (end_info.state != Kernel::KMemoryState::Free) { if (end_info.GetState() != Kernel::KMemoryState::Free) {
return {}; return {};
} }

View File

@ -128,7 +128,8 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
} }
ASSERT(system.CurrentProcess() ASSERT(system.CurrentProcess()
->PageTable() ->PageTable()
.LockForDeviceAddressSpace(handle_description->address, handle_description->size) .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
Kernel::KMemoryPermission::None, true)
.IsSuccess()); .IsSuccess());
std::memcpy(output.data(), &params, sizeof(params)); std::memcpy(output.data(), &params, sizeof(params));
return result; return result;

View File

@ -65,7 +65,7 @@ struct Memory::Impl {
return {}; return {};
} }
return system.DeviceMemory().GetPointer(paddr) + vaddr; return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
} }
[[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
@ -75,7 +75,7 @@ struct Memory::Impl {
return {}; return {};
} }
return system.DeviceMemory().GetPointer(paddr) + vaddr; return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
} }
u8 Read8(const VAddr addr) { u8 Read8(const VAddr addr) {
@ -499,7 +499,7 @@ struct Memory::Impl {
} else { } else {
while (base != end) { while (base != end) {
page_table.pointers[base].Store( page_table.pointers[base].Store(
system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type); system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
ASSERT_MSG(page_table.pointers[base].Pointer(), ASSERT_MSG(page_table.pointers[base].Pointer(),

View File

@ -40,9 +40,6 @@ struct ScopeInit final {
core_timing.SetMulticore(true); core_timing.SetMulticore(true);
core_timing.Initialize([]() {}); core_timing.Initialize([]() {});
} }
~ScopeInit() {
core_timing.Shutdown();
}
Core::Timing::CoreTiming core_timing; Core::Timing::CoreTiming core_timing;
}; };

View File

@ -59,11 +59,12 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) { std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
return query_pool == *pool; return query_pool == *pool;
}); });
ASSERT(it != std::end(pools));
if (it != std::end(pools)) {
const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
} }
}
QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
Scheduler& scheduler_) Scheduler& scheduler_)

View File

@ -120,8 +120,8 @@ void EmuThread::run() {
} }
} }
// Shutdown the core emulation // Shutdown the main emulated process
system.Shutdown(); system.ShutdownMainProcess();
#if MICROPROFILE_ENABLED #if MICROPROFILE_ENABLED
MicroProfileOnThreadExit(); MicroProfileOnThreadExit();

View File

@ -294,6 +294,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan
#ifdef __linux__ #ifdef __linux__
SetupSigInterrupts(); SetupSigInterrupts();
#endif #endif
system->Initialize();
Common::Log::Initialize(); Common::Log::Initialize();
LoadTranslation(); LoadTranslation();
@ -1894,8 +1895,6 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target
case GameListOpenTarget::SaveData: { case GameListOpenTarget::SaveData: {
open_target = tr("Save Data"); open_target = tr("Save Data");
const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir); const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir);
auto vfs_nand_dir =
vfs->OpenDirectory(Common::FS::PathToUTF8String(nand_dir), FileSys::Mode::Read);
if (has_user_save) { if (has_user_save) {
// User save data // User save data
@ -1922,15 +1921,15 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target
ASSERT(user_id); ASSERT(user_id);
const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath( const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath(
*system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser, *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData,
FileSys::SaveDataType::SaveData, program_id, user_id->AsU128(), 0); program_id, user_id->AsU128(), 0);
path = Common::FS::ConcatPathSafe(nand_dir, user_save_data_path); path = Common::FS::ConcatPathSafe(nand_dir, user_save_data_path);
} else { } else {
// Device save data // Device save data
const auto device_save_data_path = FileSys::SaveDataFactory::GetFullPath( const auto device_save_data_path = FileSys::SaveDataFactory::GetFullPath(
*system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser, *system, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData,
FileSys::SaveDataType::SaveData, program_id, {}, 0); program_id, {}, 0);
path = Common::FS::ConcatPathSafe(nand_dir, device_save_data_path); path = Common::FS::ConcatPathSafe(nand_dir, device_save_data_path);
} }

View File

@ -302,6 +302,8 @@ int main(int argc, char** argv) {
} }
Core::System system{}; Core::System system{};
system.Initialize();
InputCommon::InputSubsystem input_subsystem{}; InputCommon::InputSubsystem input_subsystem{};
// Apply the command line arguments // Apply the command line arguments
@ -392,7 +394,7 @@ int main(int argc, char** argv) {
} }
system.DetachDebugger(); system.DetachDebugger();
void(system.Pause()); void(system.Pause());
system.Shutdown(); system.ShutdownMainProcess();
detached_tasks.WaitForAllTasks(); detached_tasks.WaitForAllTasks();
return 0; return 0;