early-access version 1660

This commit is contained in:
pineappleEA
2021-05-06 04:45:54 +02:00
parent 37b353b187
commit 46973c4047
149 changed files with 2168 additions and 1784 deletions

View File

@@ -17,12 +17,12 @@ GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) {
void GlobalSchedulerContext::AddThread(KThread* thread) {
std::scoped_lock lock{global_list_guard};
thread_list.push_back(std::move(thread));
thread_list.push_back(thread);
}
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) {
void GlobalSchedulerContext::RemoveThread(KThread* thread) {
std::scoped_lock lock{global_list_guard};
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());

View File

@@ -38,13 +38,13 @@ public:
~GlobalSchedulerContext();
/// Adds a new thread to the scheduler
void AddThread(std::shared_ptr<KThread> thread);
void AddThread(KThread* thread);
/// Removes a thread from the scheduler
void RemoveThread(std::shared_ptr<KThread> thread);
void RemoveThread(KThread* thread);
/// Returns a list of all threads managed by the scheduler
[[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const {
[[nodiscard]] const std::vector<KThread*>& GetThreadList() const {
return thread_list;
}
@@ -79,7 +79,7 @@ private:
LockType scheduler_lock;
/// Lists all thread ids that aren't deleted/etc.
std::vector<std::shared_ptr<KThread>> thread_list;
std::vector<KThread*> thread_list;
Common::SpinLock global_list_guard{};
};

View File

@@ -14,17 +14,16 @@
#include "common/common_types.h"
#include "common/logging/log.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h"
#include "core/memory.h"
@@ -35,28 +34,23 @@ SessionRequestHandler::SessionRequestHandler() = default;
SessionRequestHandler::~SessionRequestHandler() = default;
void SessionRequestHandler::ClientConnected(std::shared_ptr<ServerSession> server_session) {
server_session->SetHleHandler(shared_from_this());
connected_sessions.push_back(std::move(server_session));
void SessionRequestHandler::ClientConnected(KServerSession* session) {
session->SetHleHandler(shared_from_this());
}
void SessionRequestHandler::ClientDisconnected(
const std::shared_ptr<ServerSession>& server_session) {
server_session->SetHleHandler(nullptr);
boost::range::remove_erase(connected_sessions, server_session);
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
session->SetHleHandler(nullptr);
}
HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
std::shared_ptr<ServerSession> server_session,
std::shared_ptr<KThread> thread)
: server_session(std::move(server_session)),
thread(std::move(thread)), kernel{kernel}, memory{memory} {
HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
KServerSession* server_session_, KThread* thread_)
: server_session(server_session_), thread(thread_), kernel{kernel_}, memory{memory_} {
cmd_buf[0] = 0;
}
HLERequestContext::~HLERequestContext() = default;
void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf,
void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf,
bool incoming) {
IPC::RequestParser rp(src_cmdbuf);
command_header = rp.PopRaw<IPC::CommandHeader>();
@@ -77,12 +71,12 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_
for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_copy; ++handle) {
const u32 copy_handle{rp.Pop<Handle>()};
copy_handles.push_back(copy_handle);
copy_objects.push_back(handle_table.GetGeneric(copy_handle));
copy_objects.push_back(handle_table.GetObject(copy_handle).GetPointerUnsafe());
}
for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_move; ++handle) {
const u32 move_handle{rp.Pop<Handle>()};
move_handles.push_back(move_handle);
move_objects.push_back(handle_table.GetGeneric(move_handle));
move_objects.push_back(handle_table.GetObject(move_handle).GetPointerUnsafe());
}
} else {
// For responses we just ignore the handles, they're empty and will be populated when
@@ -169,7 +163,7 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_
rp.Skip(1, false); // The command is actually an u64, but we don't use the high part.
}
ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
u32_le* src_cmdbuf) {
ParseCommandBuffer(handle_table, src_cmdbuf, true);
if (command_header->type == IPC::CommandType::Close) {
@@ -223,12 +217,12 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) {
// for specific values in each of these descriptors.
for (auto& object : copy_objects) {
ASSERT(object != nullptr);
dst_cmdbuf[current_offset++] = handle_table.Create(object).Unwrap();
R_TRY(handle_table.Add(&dst_cmdbuf[current_offset++], object));
}
for (auto& object : move_objects) {
ASSERT(object != nullptr);
dst_cmdbuf[current_offset++] = handle_table.Create(object).Unwrap();
R_TRY(handle_table.Add(&dst_cmdbuf[current_offset++], object));
}
}

View File

@@ -16,7 +16,8 @@
#include "common/concepts.h"
#include "common/swap.h"
#include "core/hle/ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/svc_common.h"
union ResultCode;
@@ -35,13 +36,14 @@ class ServiceFrameworkBase;
namespace Kernel {
class Domain;
class HandleTable;
class HLERequestContext;
class KernelCore;
class Process;
class ServerSession;
class KHandleTable;
class KProcess;
class KServerSession;
class KThread;
class KReadableEvent;
class KSession;
class KWritableEvent;
enum class ThreadWakeupReason;
@@ -71,20 +73,14 @@ public:
* associated ServerSession alive for the duration of the connection.
* @param server_session Owning pointer to the ServerSession associated with the connection.
*/
void ClientConnected(std::shared_ptr<ServerSession> server_session);
void ClientConnected(KServerSession* session);
/**
* Signals that a client has just disconnected from this HLE handler and releases the
* associated ServerSession.
* @param server_session ServerSession associated with the connection.
*/
void ClientDisconnected(const std::shared_ptr<ServerSession>& server_session);
protected:
/// List of sessions that are connected to this handler.
/// A ServerSession whose server endpoint is an HLE implementation is kept alive by this list
/// for the duration of the connection.
std::vector<std::shared_ptr<ServerSession>> connected_sessions;
void ClientDisconnected(KServerSession* session);
};
/**
@@ -109,8 +105,7 @@ protected:
class HLERequestContext {
public:
explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
std::shared_ptr<ServerSession> session,
std::shared_ptr<KThread> thread);
KServerSession* session, KThread* thread);
~HLERequestContext();
/// Returns a pointer to the IPC command buffer for this request.
@@ -122,12 +117,12 @@ public:
* Returns the session through which this request was made. This can be used as a map key to
* access per-client data on services.
*/
const std::shared_ptr<Kernel::ServerSession>& Session() const {
Kernel::KServerSession* Session() {
return server_session;
}
/// Populates this context with data from the requesting process/thread.
ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
ResultCode PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
u32_le* src_cmdbuf);
/// Writes data from this context back to the requesting process/thread.
@@ -218,22 +213,12 @@ public:
return move_handles.at(index);
}
template <typename T>
std::shared_ptr<T> GetCopyObject(std::size_t index) {
return DynamicObjectCast<T>(copy_objects.at(index));
void AddMoveObject(KAutoObject* object) {
move_objects.emplace_back(object);
}
template <typename T>
std::shared_ptr<T> GetMoveObject(std::size_t index) {
return DynamicObjectCast<T>(move_objects.at(index));
}
void AddMoveObject(std::shared_ptr<Object> object) {
move_objects.emplace_back(std::move(object));
}
void AddCopyObject(std::shared_ptr<Object> object) {
copy_objects.emplace_back(std::move(object));
void AddCopyObject(KAutoObject* object) {
copy_objects.emplace_back(object);
}
void AddDomainObject(std::shared_ptr<SessionRequestHandler> object) {
@@ -276,10 +261,6 @@ public:
return *thread;
}
const KThread& GetThread() const {
return *thread;
}
bool IsThreadWaiting() const {
return is_thread_waiting;
}
@@ -287,16 +268,17 @@ public:
private:
friend class IPC::ResponseBuilder;
void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming);
void ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf, bool incoming);
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
std::shared_ptr<Kernel::ServerSession> server_session;
std::shared_ptr<KThread> thread;
Kernel::KServerSession* server_session{};
KThread* thread;
// TODO(yuriks): Check common usage of this and optimize size accordingly
boost::container::small_vector<Handle, 8> move_handles;
boost::container::small_vector<Handle, 8> copy_handles;
boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects;
boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects;
boost::container::small_vector<KAutoObject*, 8> move_objects;
boost::container::small_vector<KAutoObject*, 8> copy_objects;
boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects;
std::optional<IPC::CommandHeader> command_header;

View File

@@ -7,12 +7,13 @@
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_linked_list.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/svc_common.h"
#include "core/hle/kernel/svc_results.h"
#include "core/memory.h"
@@ -107,8 +108,8 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
// Wait for the address.
{
std::shared_ptr<KThread> owner_thread;
ASSERT(!owner_thread);
KScopedAutoObject<KThread> owner_thread;
ASSERT(owner_thread.IsNull());
{
KScopedSchedulerLock sl(kernel);
cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
@@ -126,8 +127,10 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
// Get the lock owner thread.
owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle);
R_UNLESS(owner_thread, ResultInvalidHandle);
owner_thread =
kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(
handle);
R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle);
// Update the lock.
cur_thread->SetAddressKey(addr, value);
@@ -137,7 +140,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
cur_thread->SetMutexWaitAddressForDebugging(addr);
}
}
ASSERT(owner_thread);
ASSERT(owner_thread.IsNotNull());
}
// Remove the thread as a waiter from the lock owner.
@@ -176,19 +179,22 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
KThread* thread_to_close = nullptr;
if (can_access) {
if (prev_tag == InvalidHandle) {
if (prev_tag == Svc::InvalidHandle) {
// If nobody held the lock previously, we're all good.
thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
thread->Wakeup();
} else {
// Get the previous owner.
auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(
prev_tag & ~Svc::HandleWaitMask);
KThread* owner_thread = kernel.CurrentProcess()
->GetHandleTable()
.GetObjectWithoutPseudoHandle<KThread>(
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
.ReleasePointerUnsafe();
if (owner_thread) {
// Add the thread as a waiter on the owner.
owner_thread->AddWaiter(thread);
thread_to_close = owner_thread.get();
thread_to_close = owner_thread;
} else {
// The lock was tagged with a thread that doesn't exist.
thread->SetSyncedObject(nullptr, ResultInvalidState);
@@ -208,9 +214,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
// Prepare for signaling.
constexpr int MaxThreads = 16;
// TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using
// std::shared_ptr.
std::vector<std::shared_ptr<KThread>> thread_list;
KLinkedList<KThread> thread_list{kernel};
std::array<KThread*, MaxThreads> thread_array;
s32 num_to_close{};
@@ -228,7 +232,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
if (num_to_close < MaxThreads) {
thread_array[num_to_close++] = thread;
} else {
thread_list.push_back(SharedFrom(thread));
thread_list.push_back(*thread);
}
}
@@ -250,8 +254,9 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
}
// Close threads in the list.
for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
(*it)->Close();
for (auto it = thread_list.begin(); it != thread_list.end();
it = thread_list.erase(kernel, it)) {
(*it).Close();
}
}

View File

@@ -3,30 +3,53 @@
// Refer to the license.txt file included.
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
namespace Kernel {
KEvent::KEvent(KernelCore& kernel, std::string&& name) : Object{kernel, std::move(name)} {}
KEvent::KEvent(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, readable_event{kernel}, writable_event{kernel} {}
KEvent::~KEvent() = default;
std::shared_ptr<KEvent> KEvent::Create(KernelCore& kernel, std::string&& name) {
return std::make_shared<KEvent>(kernel, std::move(name));
}
void KEvent::Initialize(std::string&& name_) {
// Increment reference count.
// Because reference count is one on creation, this will result
// in a reference count of two. Thus, when both readable and
// writable events are closed this object will be destroyed.
Open();
void KEvent::Initialize() {
// Create our sub events.
readable_event = std::make_shared<KReadableEvent>(kernel, GetName() + ":Readable");
writable_event = std::make_shared<KWritableEvent>(kernel, GetName() + ":Writable");
KAutoObject::Create(std::addressof(readable_event));
KAutoObject::Create(std::addressof(writable_event));
// Initialize our sub sessions.
readable_event->Initialize(this);
writable_event->Initialize(this);
readable_event.Initialize(this, name_ + ":Readable");
writable_event.Initialize(this, name_ + ":Writable");
// Set our owner process.
owner = kernel.CurrentProcess();
if (owner) {
owner->Open();
}
// Mark initialized.
name = std::move(name_);
initialized = true;
}
void KEvent::Finalize() {
KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList>::Finalize();
}
void KEvent::PostDestroy(uintptr_t arg) {
// Release the event count resource the owner process holds.
KProcess* owner = reinterpret_cast<KProcess*>(arg);
if (owner) {
owner->GetResourceLimit()->Release(LimitableResource::Events, 1);
owner->Close();
}
}
} // namespace Kernel

View File

@@ -4,53 +4,54 @@
#pragma once
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KernelCore;
class KReadableEvent;
class KWritableEvent;
class KProcess;
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
class KEvent final : public Object {
public:
explicit KEvent(KernelCore& kernel, std::string&& name);
~KEvent() override;
explicit KEvent(KernelCore& kernel);
virtual ~KEvent();
static std::shared_ptr<KEvent> Create(KernelCore& kernel, std::string&& name);
void Initialize(std::string&& name);
void Initialize();
virtual void Finalize() override;
void Finalize() override {}
std::string GetTypeName() const override {
return "KEvent";
virtual bool IsInitialized() const override {
return initialized;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Event;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
virtual uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(owner);
}
std::shared_ptr<KReadableEvent>& GetReadableEvent() {
static void PostDestroy(uintptr_t arg);
virtual KProcess* GetOwner() const override {
return owner;
}
KReadableEvent& GetReadableEvent() {
return readable_event;
}
std::shared_ptr<KWritableEvent>& GetWritableEvent() {
return writable_event;
}
const std::shared_ptr<KReadableEvent>& GetReadableEvent() const {
return readable_event;
}
const std::shared_ptr<KWritableEvent>& GetWritableEvent() const {
KWritableEvent& GetWritableEvent() {
return writable_event;
}
private:
std::shared_ptr<KReadableEvent> readable_event;
std::shared_ptr<KWritableEvent> writable_event;
KReadableEvent readable_event;
KWritableEvent writable_event;
KProcess* owner{};
bool initialized{};
};

View File

@@ -134,6 +134,10 @@ enum class KMemoryPermission : u8 {
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission perm) {
return static_cast<KMemoryPermission>(perm);
}
enum class KMemoryAttribute : u8 {
None = 0x00,
Mask = 0x7F,

View File

@@ -11,11 +11,11 @@
#include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/k_page_linked_list.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_system_control.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/svc_results.h"
#include "core/memory.h"
@@ -420,7 +420,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
remaining_size);
if (!memory_reservation.Succeeded()) {
LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
return ResultResourceLimitedExceeded;
return ResultLimitReached;
}
KPageLinkedList page_linked_list;
@@ -578,7 +578,7 @@ ResultCode KPageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) {
AddRegionToPages(dst_addr, num_pages, dst_pages);
if (!dst_pages.IsEqual(src_pages)) {
return ResultInvalidMemoryRange;
return ResultInvalidMemoryRegion;
}
{
@@ -641,6 +641,45 @@ ResultCode KPageTable::MapPages(VAddr addr, KPageLinkedList& page_linked_list, K
return RESULT_SUCCESS;
}
ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
VAddr cur_addr{addr};
for (const auto& node : page_linked_list.Nodes()) {
const std::size_t num_pages{(addr - cur_addr) / PageSize};
if (const auto result{
Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)};
result.IsError()) {
return result;
}
cur_addr += node.GetNumPages() * PageSize;
}
return RESULT_SUCCESS;
}
ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
KMemoryState state) {
std::lock_guard lock{page_table_lock};
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
if (!CanContain(addr, size, state)) {
return ResultInvalidCurrentMemory;
}
if (IsRegionMapped(addr, num_pages * PageSize)) {
return ResultInvalidCurrentMemory;
}
CASCADE_CODE(UnmapPages(addr, page_linked_list));
block_manager->Update(addr, num_pages, state, KMemoryPermission::None);
return RESULT_SUCCESS;
}
ResultCode KPageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size,
KMemoryPermission perm) {
@@ -790,7 +829,7 @@ ResultVal<VAddr> KPageTable::SetHeapSize(std::size_t size) {
if (!memory_reservation.Succeeded()) {
LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta);
return ResultResourceLimitedExceeded;
return ResultLimitReached;
}
KPageLinkedList page_linked_list;
@@ -1067,7 +1106,7 @@ constexpr std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
}
}
constexpr bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const {
bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const {
const VAddr end{addr + size};
const VAddr last{end - 1};
const VAddr region_start{GetRegionAddress(state)};

View File

@@ -40,6 +40,7 @@ public:
ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
KMemoryPermission perm);
ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm);
KMemoryInfo QueryInfo(VAddr addr);
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
@@ -63,6 +64,8 @@ public:
return page_table_impl;
}
bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
private:
enum class OperationType : u32 {
Map,
@@ -79,6 +82,7 @@ private:
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
KMemoryPermission perm);
ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end);
bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const;
@@ -92,7 +96,6 @@ private:
OperationType operation, PAddr map_addr = 0);
constexpr VAddr GetRegionAddress(KMemoryState state) const;
constexpr std::size_t GetRegionSize(KMemoryState state) const;
constexpr bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
constexpr ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
@@ -216,8 +219,6 @@ public:
constexpr PAddr GetPhysicalAddr(VAddr addr) {
return page_table_impl.backing_addr[addr >> PageBits] + addr;
}
private:
constexpr bool Contains(VAddr addr) const {
return address_space_start <= addr && addr <= address_space_end - 1;
}
@@ -225,6 +226,8 @@ private:
return address_space_start <= addr && addr < addr + size &&
addr + size - 1 <= address_space_end - 1;
}
private:
constexpr bool IsKernel() const {
return is_kernel;
}

View File

@@ -127,7 +127,6 @@ ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::st
process->resource_limit = kernel.GetSystemResourceLimit();
process->status = ProcessStatus::Created;
process->program_id = 0;
process->system = &system;
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
: kernel.CreateNewUserProcessID();
process->capabilities.InitializeForMetadatalessProcess();
@@ -368,7 +367,7 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
ChangeStatus(ProcessStatus::Running);
SetupMainThread(*system, *this, main_thread_priority, main_thread_stack_top);
SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
}
void KProcess::PrepareForTermination() {
@@ -390,7 +389,7 @@ void KProcess::PrepareForTermination() {
}
};
stop_threads(system->GlobalSchedulerContext().GetThreadList());
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
FreeTLSRegion(tls_region_address);
tls_region_address = 0;
@@ -438,7 +437,7 @@ VAddr KProcess::CreateTLSRegion() {
const VAddr start{page_table->GetKernelMapRegionStart()};
const VAddr size{page_table->GetKernelMapRegionEnd() - start};
const PAddr tls_map_addr{system->DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
const PAddr tls_map_addr{kernel.System().DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
const VAddr tls_page_addr{page_table
->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
KMemoryState::ThreadLocal,
@@ -479,7 +478,8 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission);
};
system->Memory().WriteBlock(*this, base_addr, code_set.memory.data(), code_set.memory.size());
kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
code_set.memory.size());
ReprotectSegment(code_set.CodeSegment(), KMemoryPermission::ReadAndExecute);
ReprotectSegment(code_set.RODataSegment(), KMemoryPermission::Read);
@@ -492,9 +492,9 @@ bool KProcess::IsSignaled() const {
}
KProcess::KProcess(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, page_table{std::make_unique<KPageTable>(
*system)},
handle_table{kernel}, address_arbiter{*system}, condition_var{*system}, state_lock{kernel} {}
: KAutoObjectWithSlabHeapAndContainer{kernel},
page_table{std::make_unique<KPageTable>(kernel.System())}, handle_table{kernel},
address_arbiter{kernel.System()}, condition_var{kernel.System()}, state_lock{kernel} {}
KProcess::~KProcess() = default;

View File

@@ -477,8 +477,6 @@ private:
KThread* exception_thread{};
Core::System* system{};
KLightLock state_lock;
};

View File

@@ -2,21 +2,18 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/logging/log.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
KReadableEvent::KReadableEvent(KernelCore& kernel, std::string&& name)
: KSynchronizationObject{kernel, std::move(name)} {}
KReadableEvent::KReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
KReadableEvent::~KReadableEvent() = default;
bool KReadableEvent::IsSignaled() const {
@@ -25,6 +22,12 @@ bool KReadableEvent::IsSignaled() const {
return is_signaled;
}
void KReadableEvent::Destroy() {
if (parent) {
parent->Close();
}
}
ResultCode KReadableEvent::Signal() {
KScopedSchedulerLock lk{kernel};

View File

@@ -4,8 +4,9 @@
#pragma once
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
@@ -13,31 +14,25 @@ namespace Kernel {
class KernelCore;
class KEvent;
class KReadableEvent final : public KSynchronizationObject {
class KReadableEvent : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
public:
explicit KReadableEvent(KernelCore& kernel, std::string&& name);
explicit KReadableEvent(KernelCore& kernel);
~KReadableEvent() override;
std::string GetTypeName() const override {
return "KReadableEvent";
}
static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
void Initialize(KEvent* parent_, std::string&& name_) {
is_signaled = false;
parent = parent_;
name = std::move(name_);
}
KEvent* GetParent() const {
return parent;
}
void Initialize(KEvent* parent_) {
is_signaled = false;
parent = parent_;
}
bool IsSignaled() const override;
void Finalize() override {}
virtual bool IsSignaled() const override;
virtual void Destroy() override;
ResultCode Signal();
ResultCode Clear();

View File

@@ -10,10 +10,16 @@
namespace Kernel {
constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
KResourceLimit::KResourceLimit(KernelCore& kernel, const Core::Timing::CoreTiming& core_timing_)
: Object{kernel}, lock{kernel}, cond_var{kernel}, core_timing(core_timing_) {}
KResourceLimit::KResourceLimit(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, lock{kernel}, cond_var{kernel} {}
KResourceLimit::~KResourceLimit() = default;
void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) {
core_timing = core_timing_;
}
void KResourceLimit::Finalize() {}
s64 KResourceLimit::GetLimitValue(LimitableResource which) const {
const auto index = static_cast<std::size_t>(which);
s64 value{};
@@ -78,7 +84,7 @@ ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
}
bool KResourceLimit::Reserve(LimitableResource which, s64 value) {
return Reserve(which, value, core_timing.GetGlobalTimeNs().count() + DefaultTimeout);
return Reserve(which, value, core_timing->GetGlobalTimeNs().count() + DefaultTimeout);
}
bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
@@ -109,7 +115,7 @@ bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
}
if (current_hints[index] + value <= limit_values[index] &&
(timeout < 0 || core_timing.GetGlobalTimeNs().count() < timeout)) {
(timeout < 0 || core_timing->GetGlobalTimeNs().count() < timeout)) {
waiter_count++;
cond_var.Wait(&lock, timeout);
waiter_count--;

View File

@@ -8,7 +8,6 @@
#include "common/common_types.h"
#include "core/hle/kernel/k_light_condition_variable.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/object.h"
union ResultCode;
@@ -32,10 +31,16 @@ constexpr bool IsValidResourceType(LimitableResource type) {
return type < LimitableResource::Count;
}
class KResourceLimit final : public Object {
class KResourceLimit final
: public KAutoObjectWithSlabHeapAndContainer<KResourceLimit, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject);
public:
explicit KResourceLimit(KernelCore& kernel, const Core::Timing::CoreTiming& core_timing_);
~KResourceLimit();
explicit KResourceLimit(KernelCore& kernel);
virtual ~KResourceLimit();
void Initialize(const Core::Timing::CoreTiming* core_timing_);
virtual void Finalize() override;
s64 GetLimitValue(LimitableResource which) const;
s64 GetCurrentValue(LimitableResource which) const;
@@ -49,19 +54,7 @@ public:
void Release(LimitableResource which, s64 value);
void Release(LimitableResource which, s64 value, s64 hint);
std::string GetTypeName() const override {
return "KResourceLimit";
}
std::string GetName() const override {
return GetTypeName();
}
static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
virtual void Finalize() override {}
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
private:
using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>;
@@ -72,6 +65,6 @@ private:
mutable KLightLock lock;
s32 waiter_count{};
KLightConditionVariable cond_var;
const Core::Timing::CoreTiming& core_timing;
const Core::Timing::CoreTiming* core_timing{};
};
} // namespace Kernel

View File

@@ -15,12 +15,12 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "core/cpu_manager.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel {
@@ -71,7 +71,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
}
if (state.should_count_idle) {
if (highest_thread != nullptr) {
if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
process->SetRunningThread(core_id, highest_thread, state.idle_count);
}
} else {
@@ -104,7 +104,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
if (top_thread != nullptr) {
// If the thread has no waiters, we need to check if the process has a thread pinned.
if (top_thread->GetNumKernelWaiters() == 0) {
if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
if (KProcess* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
pinned != nullptr && pinned != top_thread) {
// We prefer our parent's pinned thread if possible. However, we also don't
@@ -411,7 +411,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
// Get the current thread and process.
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess();
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@@ -450,7 +450,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
// Get the current thread and process.
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess();
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@@ -538,7 +538,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
// Get the current thread and process.
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess();
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@@ -617,7 +617,12 @@ KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core
state.highest_priority_thread = nullptr;
}
KScheduler::~KScheduler() = default;
KScheduler::~KScheduler() {
if (idle_thread) {
idle_thread->Close();
idle_thread = nullptr;
}
}
KThread* KScheduler::GetCurrentThread() const {
if (auto result = current_thread.load(); result) {
@@ -719,7 +724,7 @@ void KScheduler::ScheduleImpl() {
current_thread.store(next_thread);
Process* const previous_process = system.Kernel().CurrentProcess();
KProcess* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -775,7 +780,7 @@ void KScheduler::SwitchToCurrent() {
}
}
void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) {
void KScheduler::UpdateLastContextSwitchTime(KThread* thread, KProcess* process) {
const u64 prev_switch_ticks = last_context_switch_time;
const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
@@ -792,13 +797,9 @@ void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process)
}
void KScheduler::Initialize() {
std::string name = "Idle Thread Id:" + std::to_string(core_id);
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
auto thread_res = KThread::CreateThread(
system, ThreadType::Main, name, 0, KThread::IdleThreadPriority, 0,
static_cast<u32>(core_id), 0, nullptr, std::move(init_func), init_func_parameter);
idle_thread = thread_res.Unwrap().get();
idle_thread = KThread::Create(system.Kernel());
ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess());
idle_thread->SetName(fmt::format("IdleThread:{}", core_id));
}
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)

View File

@@ -24,7 +24,7 @@ class System;
namespace Kernel {
class KernelCore;
class Process;
class KProcess;
class SchedulerLock;
class KThread;
@@ -165,7 +165,7 @@ private:
* most recent tick count retrieved. No special arithmetic is
* applied to it.
*/
void UpdateLastContextSwitchTime(KThread* thread, Process* process);
void UpdateLastContextSwitchTime(KThread* thread, KProcess* process);
static void OnSwitch(void* this_scheduler);
void SwitchToCurrent();
@@ -173,12 +173,12 @@ private:
KThread* prev_thread{};
std::atomic<KThread*> current_thread{};
KThread* idle_thread;
KThread* idle_thread{};
std::shared_ptr<Common::Fiber> switch_fiber{};
struct SchedulingState {
std::atomic<bool> needs_scheduling;
std::atomic<bool> needs_scheduling{};
bool interrupt_task_thread_runnable{};
bool should_count_idle{};
u64 idle_count{};

View File

@@ -8,15 +8,14 @@
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/process.h"
namespace Kernel {
class KScopedResourceReservation {
public:
explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r,
s64 v, s64 timeout)
explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v, s64 timeout)
: resource_limit(std::move(l)), value(v), resource(r) {
if (resource_limit && value) {
success = resource_limit->Reserve(resource, value, timeout);
@@ -25,8 +24,7 @@ public:
}
}
explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r,
s64 v = 1)
explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v = 1)
: resource_limit(std::move(l)), value(v), resource(r) {
if (resource_limit && value) {
success = resource_limit->Reserve(resource, value);
@@ -35,10 +33,10 @@ public:
}
}
explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v, s64 t)
explicit KScopedResourceReservation(const KProcess* p, LimitableResource r, s64 v, s64 t)
: KScopedResourceReservation(p->GetResourceLimit(), r, v, t) {}
explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v = 1)
explicit KScopedResourceReservation(const KProcess* p, LimitableResource r, s64 v = 1)
: KScopedResourceReservation(p->GetResourceLimit(), r, v) {}
~KScopedResourceReservation() noexcept {
@@ -58,7 +56,7 @@ public:
}
private:
std::shared_ptr<KResourceLimit> resource_limit;
KResourceLimit* resource_limit{};
s64 value;
LimitableResource resource;
bool success;

View File

@@ -8,7 +8,7 @@
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/time_manager.h"

View File

@@ -8,50 +8,74 @@
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
KSharedMemory::KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory)
: Object{kernel}, device_memory{device_memory} {}
KSharedMemory::KSharedMemory(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
KSharedMemory::~KSharedMemory() {
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
}
std::shared_ptr<KSharedMemory> KSharedMemory::Create(
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
KPageLinkedList&& page_list, KMemoryPermission owner_permission,
KMemoryPermission user_permission, PAddr physical_address, std::size_t size, std::string name) {
ResultCode KSharedMemory::Initialize(KernelCore& kernel_, Core::DeviceMemory& device_memory_,
KProcess* owner_process_, KPageLinkedList&& page_list_,
Svc::MemoryPermission owner_permission_,
Svc::MemoryPermission user_permission_,
PAddr physical_address_, std::size_t size_,
std::string name_) {
// Set members.
owner_process = owner_process_;
device_memory = &device_memory_;
page_list = std::move(page_list_);
owner_permission = owner_permission_;
user_permission = user_permission_;
physical_address = physical_address_;
size = size_;
name = name_;
const auto resource_limit = kernel.GetSystemResourceLimit();
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
size);
ASSERT(memory_reservation.Succeeded());
// Get the resource limit.
KResourceLimit* reslimit = kernel.GetSystemResourceLimit();
std::shared_ptr<KSharedMemory> shared_memory{
std::make_shared<KSharedMemory>(kernel, device_memory)};
shared_memory->owner_process = owner_process;
shared_memory->page_list = std::move(page_list);
shared_memory->owner_permission = owner_permission;
shared_memory->user_permission = user_permission;
shared_memory->physical_address = physical_address;
shared_memory->size = size;
shared_memory->name = name;
// Reserve memory for ourselves.
KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemory,
size_);
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Commit our reservation.
memory_reservation.Commit();
return shared_memory;
// Set our resource limit.
resource_limit = reslimit;
resource_limit->Open();
// Mark initialized.
is_initialized = true;
// Clear all pages in the memory.
std::memset(device_memory_.GetPointer(physical_address_), 0, size_);
return RESULT_SUCCESS;
}
ResultCode KSharedMemory::Map(Process& target_process, VAddr address, std::size_t size,
KMemoryPermission permissions) {
void KSharedMemory::Finalize() {
// Release the memory reservation.
resource_limit->Release(LimitableResource::PhysicalMemory, size);
resource_limit->Close();
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
}
ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t size,
Svc::MemoryPermission permissions) {
const u64 page_count{(size + PageSize - 1) / PageSize};
if (page_list.GetNumPages() != page_count) {
UNIMPLEMENTED_MSG("Page count does not match");
}
const KMemoryPermission expected =
const Svc::MemoryPermission expected =
&target_process == owner_process ? owner_permission : user_permission;
if (permissions != expected) {
@@ -59,7 +83,17 @@ ResultCode KSharedMemory::Map(Process& target_process, VAddr address, std::size_
}
return target_process.PageTable().MapPages(address, page_list, KMemoryState::Shared,
permissions);
ConvertToKMemoryPermission(permissions));
}
ResultCode KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t size) {
const u64 page_count{(size + PageSize - 1) / PageSize};
if (page_list.GetNumPages() != page_count) {
UNIMPLEMENTED_MSG("Page count does not match");
}
return target_process.PageTable().UnmapPages(address, page_list, KMemoryState::Shared);
}
} // namespace Kernel

View File

@@ -11,37 +11,27 @@
#include "core/device_memory.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_page_linked_list.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KernelCore;
class KSharedMemory final : public Object {
class KSharedMemory final
: public KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
public:
explicit KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory);
explicit KSharedMemory(KernelCore& kernel);
~KSharedMemory() override;
static std::shared_ptr<KSharedMemory> Create(
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
KPageLinkedList&& page_list, KMemoryPermission owner_permission,
KMemoryPermission user_permission, PAddr physical_address, std::size_t size,
std::string name);
std::string GetTypeName() const override {
return "SharedMemory";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::SharedMemory;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
ResultCode Initialize(KernelCore& kernel_, Core::DeviceMemory& device_memory_,
KProcess* owner_process_, KPageLinkedList&& page_list_,
Svc::MemoryPermission owner_permission_,
Svc::MemoryPermission user_permission_, PAddr physical_address_,
std::size_t size_, std::string name_);
/**
* Maps a shared memory block to an address in the target process' address space
@@ -50,8 +40,16 @@ public:
* @param size Size of the shared memory block to map
* @param permissions Memory block map permissions (specified by SVC field)
*/
ResultCode Map(Process& target_process, VAddr address, std::size_t size,
KMemoryPermission permissions);
ResultCode Map(KProcess& target_process, VAddr address, std::size_t size,
Svc::MemoryPermission permissions);
/**
* Unmaps a shared memory block from an address in the target process' address space
* @param target_process Process on which to unmap the memory block
* @param address Address in system memory to unmap shared memory block
* @param size Size of the shared memory block to unmap
*/
ResultCode Unmap(KProcess& target_process, VAddr address, std::size_t size);
/**
* Gets a pointer to the shared memory block
@@ -59,7 +57,7 @@ public:
* @return A pointer to the shared memory block from the specified offset
*/
u8* GetPointer(std::size_t offset = 0) {
return device_memory.GetPointer(physical_address + offset);
return device_memory->GetPointer(physical_address + offset);
}
/**
@@ -68,20 +66,26 @@ public:
* @return A pointer to the shared memory block from the specified offset
*/
const u8* GetPointer(std::size_t offset = 0) const {
return device_memory.GetPointer(physical_address + offset);
return device_memory->GetPointer(physical_address + offset);
}
void Finalize() override {}
virtual void Finalize() override;
virtual bool IsInitialized() const override {
return is_initialized;
}
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
private:
Core::DeviceMemory& device_memory;
Process* owner_process{};
Core::DeviceMemory* device_memory;
KProcess* owner_process{};
KPageLinkedList page_list;
KMemoryPermission owner_permission{};
KMemoryPermission user_permission{};
Svc::MemoryPermission owner_permission{};
Svc::MemoryPermission user_permission{};
PAddr physical_address{};
std::size_t size{};
std::string name;
KResourceLimit* resource_limit{};
bool is_initialized{};
};
} // namespace Kernel

View File

@@ -97,6 +97,7 @@ public:
void FreeImpl(void* obj) {
// Don't allow freeing an object that wasn't allocated from this heap
ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
impl.Free(obj);
}
@@ -148,6 +149,14 @@ public:
return obj;
}
T* AllocateWithKernel(KernelCore& kernel) {
T* obj = static_cast<T*>(AllocateImpl());
if (obj != nullptr) {
new (obj) T(kernel);
}
return obj;
}
void Free(T* obj) {
FreeImpl(obj);
}

View File

@@ -13,6 +13,11 @@
namespace Kernel {
void KSynchronizationObject::Finalize() {
this->OnFinalizeSynchronizationObject();
KAutoObject::Finalize();
}
ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
KSynchronizationObject** objects, const s32 num_objects,
s64 timeout) {
@@ -130,10 +135,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
return wait_result;
}
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {}
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel, std::string&& name)
: Object{kernel, std::move(name)} {}
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {}
KSynchronizationObject::~KSynchronizationObject() = default;

View File

@@ -6,7 +6,7 @@
#include <vector>
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/result.h"
namespace Kernel {
@@ -16,7 +16,9 @@ class Synchronization;
class KThread;
/// Class that represents a Kernel object that a thread can be waiting on
class KSynchronizationObject : public Object {
class KSynchronizationObject : public KAutoObjectWithList {
KERNEL_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject);
public:
struct ThreadListNode {
ThreadListNode* next{};
@@ -27,15 +29,18 @@ public:
KSynchronizationObject** objects, const s32 num_objects,
s64 timeout);
virtual void Finalize() override;
[[nodiscard]] virtual bool IsSignaled() const = 0;
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
protected:
explicit KSynchronizationObject(KernelCore& kernel);
explicit KSynchronizationObject(KernelCore& kernel, std::string&& name);
virtual ~KSynchronizationObject();
virtual void OnFinalizeSynchronizationObject() {}
void NotifyAvailable(ResultCode result);
void NotifyAvailable() {
return this->NotifyAvailable(RESULT_SUCCESS);
@@ -46,14 +51,4 @@ private:
ThreadListNode* thread_list_tail{};
};
// Specialization of DynamicObjectCast for KSynchronizationObjects
template <>
inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>(
std::shared_ptr<Object> object) {
if (object != nullptr && object->IsWaitable()) {
return std::static_pointer_cast<KSynchronizationObject>(object);
}
return nullptr;
}
} // namespace Kernel

View File

@@ -18,17 +18,16 @@
#include "core/core.h"
#include "core/cpu_manager.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/result.h"
@@ -62,11 +61,11 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
namespace Kernel {
KThread::KThread(KernelCore& kernel)
: KSynchronizationObject{kernel}, activity_pause_lock{kernel} {}
: KAutoObjectWithSlabHeapAndContainer{kernel}, activity_pause_lock{kernel} {}
KThread::~KThread() = default;
ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
s32 virt_core, Process* owner, ThreadType type) {
s32 virt_core, KProcess* owner, ThreadType type) {
// Assert parameters are valid.
ASSERT((type == ThreadType::Main) ||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
@@ -177,6 +176,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
// Set parent, if relevant.
if (owner != nullptr) {
parent = owner;
parent->Open();
parent->IncrementThreadCount();
}
@@ -209,14 +209,56 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
}
ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
VAddr user_stack_top, s32 prio, s32 core, Process* owner,
ThreadType type) {
VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
ThreadType type, std::function<void(void*)>&& init_func,
void* init_func_parameter) {
// Initialize the thread.
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
// Initialize host context.
thread->host_context =
std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
return RESULT_SUCCESS;
}
ResultCode KThread::InitializeDummyThread(KThread* thread) {
return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main);
}
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
Core::CpuManager::GetIdleThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
}
ResultCode KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
KThreadFunction func, uintptr_t arg,
s32 virt_core) {
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
Core::CpuManager::GetSuspendThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
}
ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread,
KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
s32 prio, s32 virt_core, KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread);
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
}
void KThread::PostDestroy(uintptr_t arg) {
KProcess* owner = reinterpret_cast<KProcess*>(arg & ~1ULL);
const bool resource_limit_release_hint = (arg & 1);
const s64 hint_value = (resource_limit_release_hint ? 0 : 1);
if (owner != nullptr) {
owner->GetResourceLimit()->Release(LimitableResource::Threads, 1, hint_value);
owner->Close();
}
}
void KThread::Finalize() {
// If the thread has an owner process, unregister it.
if (parent != nullptr) {
@@ -246,8 +288,10 @@ void KThread::Finalize() {
// Decrement the parent process's thread count.
if (parent != nullptr) {
parent->DecrementThreadCount();
parent->GetResourceLimit()->Release(LimitableResource::Threads, 1);
}
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject>::Finalize();
}
bool KThread::IsSignaled() const {
@@ -294,6 +338,9 @@ void KThread::StartTermination() {
// Register terminated dpc flag.
RegisterDpc(DpcFlag::Terminated);
// Close the thread.
this->Close();
}
void KThread::Pin() {
@@ -932,7 +979,7 @@ void KThread::Exit() {
// Release the thread resource hint from parent.
if (parent != nullptr) {
// TODO(bunnei): Hint that the resource is about to be released.
parent->GetResourceLimit()->Release(Kernel::LimitableResource::Threads, 0, 1);
resource_limit_release_hint = true;
}
@@ -995,56 +1042,6 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
return host_context;
}
ResultVal<std::shared_ptr<KThread>> KThread::CreateThread(Core::System& system,
ThreadType type_flags, std::string name,
VAddr entry_point, u32 priority, u64 arg,
s32 processor_id, VAddr stack_top,
Process* owner_process) {
auto& kernel = system.Kernel();
std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel);
if (const auto result =
thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority,
processor_id, owner_process, type_flags);
result.IsError()) {
return result;
}
thread->name = name;
auto& scheduler = kernel.GlobalSchedulerContext();
scheduler.AddThread(thread);
return MakeResult<std::shared_ptr<KThread>>(std::move(thread));
}
ResultVal<std::shared_ptr<KThread>> KThread::CreateThread(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority,
u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
std::function<void(void*)>&& thread_start_func, void* thread_start_parameter) {
auto thread_result = CreateThread(system, type_flags, name, entry_point, priority, arg,
processor_id, stack_top, owner_process);
if (thread_result.Succeeded()) {
(*thread_result)->host_context =
std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
}
return thread_result;
}
ResultVal<std::shared_ptr<KThread>> KThread::CreateUserThread(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority,
u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process) {
std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
return CreateThread(system, type_flags, name, entry_point, priority, arg, processor_id,
stack_top, owner_process, std::move(init_func), init_func_parameter);
}
KThread* GetCurrentThreadPointer(KernelCore& kernel) {
return kernel.GetCurrentEmuThread();
}

View File

@@ -19,7 +19,7 @@
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_common.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
@@ -37,7 +37,7 @@ namespace Kernel {
class GlobalSchedulerContext;
class KernelCore;
class Process;
class KProcess;
class KScheduler;
class KThreadQueue;
@@ -99,9 +99,13 @@ enum class ThreadWaitReasonForDebugging : u32 {
[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject>,
public boost::intrusive::list_base_hook<> {
KERNEL_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject);
private:
friend class KScheduler;
friend class Process;
friend class KProcess;
public:
static constexpr s32 DefaultThreadPriority = 44;
@@ -115,74 +119,10 @@ public:
using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
using WaiterList = boost::intrusive::list<KThread>;
/**
* Creates and returns a new thread.
* @param system The instance of the whole system
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread, if null, it's a kernel thread
* @return A shared pointer to the newly created thread
*/
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
/**
* Creates and returns a new thread, with a specified entry point.
* @param system The instance of the whole system
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread, if null, it's a kernel thread
* @param thread_start_func The function where the host context will start.
* @param thread_start_parameter The parameter which will passed to host context on init
* @return A shared pointer to the newly created thread
*/
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
/**
* Creates and returns a new thread for the emulated "user" process.
* @param system The instance of the whole system
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread, if null, it's a kernel thread
* @return A shared pointer to the newly created thread
*/
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateUserThread(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
[[nodiscard]] std::string GetName() const override {
return name;
}
void SetName(std::string new_name) {
name = std::move(new_name);
}
[[nodiscard]] std::string GetTypeName() const override {
return "Thread";
}
static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
[[nodiscard]] HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Gets the thread's current priority
* @return The current thread's priority
@@ -257,10 +197,6 @@ public:
void Suspend();
void Finalize() override;
bool IsSignaled() const override;
void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
synced_object = obj;
wait_result = wait_res;
@@ -354,11 +290,11 @@ public:
current_core_id = core;
}
[[nodiscard]] Process* GetOwnerProcess() {
[[nodiscard]] KProcess* GetOwnerProcess() {
return parent;
}
[[nodiscard]] const Process* GetOwnerProcess() const {
[[nodiscard]] const KProcess* GetOwnerProcess() const {
return parent;
}
@@ -422,6 +358,40 @@ public:
return termination_requested || GetRawState() == ThreadState::Terminated;
}
[[nodiscard]] virtual u64 GetId() const override final {
return this->GetThreadID();
}
[[nodiscard]] virtual bool IsInitialized() const override {
return initialized;
}
[[nodiscard]] virtual uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(parent) | (resource_limit_release_hint ? 1 : 0);
}
virtual void Finalize() override;
[[nodiscard]] virtual bool IsSignaled() const override;
static void PostDestroy(uintptr_t arg);
[[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
[[nodiscard]] static ResultCode InitializeIdleThread(Core::System& system, KThread* thread,
s32 virt_core);
[[nodiscard]] static ResultCode InitializeHighPriorityThread(Core::System& system,
KThread* thread,
KThreadFunction func,
uintptr_t arg, s32 virt_core);
[[nodiscard]] static ResultCode InitializeUserThread(Core::System& system, KThread* thread,
KThreadFunction func, uintptr_t arg,
VAddr user_stack_top, s32 prio,
s32 virt_core, KProcess* owner);
public:
struct StackParameters {
u8 svc_permission[0x10];
std::atomic<u8> dpc_flags;
@@ -671,11 +641,13 @@ private:
void StartTermination();
[[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
s32 prio, s32 virt_core, Process* owner, ThreadType type);
s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
[[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
uintptr_t arg, VAddr user_stack_top, s32 prio,
s32 core, Process* owner, ThreadType type);
s32 core, KProcess* owner, ThreadType type,
std::function<void(void*)>&& init_func,
void* init_func_parameter);
static void RestorePriority(KernelCore& kernel, KThread* thread);
@@ -697,7 +669,7 @@ private:
std::atomic<s64> cpu_time{};
KSynchronizationObject* synced_object{};
VAddr address_key{};
Process* parent{};
KProcess* parent{};
VAddr kernel_stack_top{};
u32* light_ipc_data{};
VAddr tls_address{};
@@ -742,7 +714,6 @@ private:
VAddr mutex_wait_address_for_debugging{};
ThreadWaitReasonForDebugging wait_reason_for_debugging{};
ThreadType thread_type_for_debugging{};
std::string name;
public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;

View File

@@ -8,20 +8,28 @@
namespace Kernel {
KWritableEvent::KWritableEvent(KernelCore& kernel, std::string&& name)
: Object{kernel, std::move(name)} {}
KWritableEvent::KWritableEvent(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
KWritableEvent::~KWritableEvent() = default;
void KWritableEvent::Initialize(KEvent* parent_) {
void KWritableEvent::Initialize(KEvent* parent_, std::string&& name_) {
parent = parent_;
name = std::move(name_);
parent->GetReadableEvent().Open();
}
ResultCode KWritableEvent::Signal() {
return parent->GetReadableEvent()->Signal();
return parent->GetReadableEvent().Signal();
}
ResultCode KWritableEvent::Clear() {
return parent->GetReadableEvent()->Clear();
return parent->GetReadableEvent().Clear();
}
void KWritableEvent::Destroy() {
// Close our references.
parent->GetReadableEvent().Close();
parent->Close();
}
} // namespace Kernel

View File

@@ -4,7 +4,8 @@
#pragma once
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
@@ -12,24 +13,19 @@ namespace Kernel {
class KernelCore;
class KEvent;
class KWritableEvent final : public Object {
class KWritableEvent final
: public KAutoObjectWithSlabHeapAndContainer<KWritableEvent, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject);
public:
explicit KWritableEvent(KernelCore& kernel, std::string&& name);
explicit KWritableEvent(KernelCore& kernel);
~KWritableEvent() override;
std::string GetTypeName() const override {
return "KWritableEvent";
}
virtual void Destroy() override;
static constexpr HandleType HANDLE_TYPE = HandleType::WritableEvent;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
void Initialize(KEvent* parent_);
void Finalize() override {}
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
void Initialize(KEvent* parent_, std::string&& name_);
ResultCode Signal();
ResultCode Clear();

View File

@@ -26,10 +26,12 @@
#include "core/cpu_manager.h"
#include "core/device_memory.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_shared_memory.h"
@@ -37,7 +39,6 @@
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/service_thread.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h"
@@ -51,7 +52,7 @@ namespace Kernel {
struct KernelCore::Impl {
explicit Impl(Core::System& system, KernelCore& kernel)
: time_manager{system}, global_handle_table{kernel}, system{system} {}
: time_manager{system}, object_list_container{kernel}, system{system} {}
void SetMulticore(bool is_multicore) {
this->is_multicore = is_multicore;
@@ -59,8 +60,7 @@ struct KernelCore::Impl {
void Initialize(KernelCore& kernel) {
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
RegisterHostThread();
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
service_thread_manager =
std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
@@ -69,14 +69,20 @@ struct KernelCore::Impl {
InitializePhysicalCores();
// Derive the initial memory layout from the emulated board
Init::InitializeSlabResourceCounts(kernel);
KMemoryLayout memory_layout;
DeriveInitialMemoryLayout(memory_layout);
InitializeMemoryLayout(memory_layout);
Init::InitializeSlabHeaps(system, memory_layout);
// Initialize kernel memory and resources.
InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout);
InitializeSlabHeaps();
InitializeMemoryLayout(memory_layout);
InitializePageSlab();
InitializeSchedulers();
InitializeSuspendThreads();
InitializePreemption(kernel);
RegisterHostThread();
}
void InitializeCores() {
@@ -93,34 +99,49 @@ struct KernelCore::Impl {
service_threads.clear();
next_object_id = 0;
next_kernel_process_id = Process::InitialKIPIDMin;
next_user_process_id = Process::ProcessIDMin;
next_kernel_process_id = KProcess::InitialKIPIDMin;
next_user_process_id = KProcess::ProcessIDMin;
next_thread_id = 1;
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
if (suspend_threads[i]) {
suspend_threads[i].reset();
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
if (suspend_threads[core_id]) {
suspend_threads[core_id]->Close();
suspend_threads[core_id] = nullptr;
}
schedulers[core_id].reset();
}
cores.clear();
current_process = nullptr;
if (current_process) {
current_process->Close();
current_process = nullptr;
}
global_handle_table.Clear();
global_handle_table.reset();
preemption_event = nullptr;
for (auto& iter : named_ports) {
iter.second->Close();
}
named_ports.clear();
exclusive_monitor.reset();
hid_shared_mem = nullptr;
font_shared_mem = nullptr;
irs_shared_mem = nullptr;
time_shared_mem = nullptr;
system_resource_limit = nullptr;
// Cleanup persistent kernel objects
auto CleanupObject = [](KAutoObject* obj) {
if (obj) {
obj->Close();
obj = nullptr;
}
};
CleanupObject(hid_shared_mem);
CleanupObject(font_shared_mem);
CleanupObject(irs_shared_mem);
CleanupObject(time_shared_mem);
CleanupObject(system_resource_limit);
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
@@ -145,7 +166,9 @@ struct KernelCore::Impl {
void InitializeSystemResourceLimit(KernelCore& kernel,
const Core::Timing::CoreTiming& core_timing,
const KMemoryLayout& memory_layout) {
system_resource_limit = std::make_shared<KResourceLimit>(kernel, core_timing);
system_resource_limit = KResourceLimit::Create(system.Kernel());
system_resource_limit->Initialize(&core_timing);
const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes();
// If setting the default system values fails, then something seriously wrong has occurred.
@@ -189,19 +212,16 @@ struct KernelCore::Impl {
}
void InitializeSuspendThreads() {
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
std::string name = "Suspend Thread Id:" + std::to_string(i);
std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc();
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
auto thread_res = KThread::CreateThread(
system, ThreadType::HighPriority, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
nullptr, std::move(init_func), init_func_parameter);
suspend_threads[i] = std::move(thread_res).Unwrap();
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
suspend_threads[core_id] = KThread::Create(system.Kernel());
ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id], {}, {},
core_id)
.IsSuccess());
suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
}
}
void MakeCurrentProcess(Process* process) {
void MakeCurrentProcess(KProcess* process) {
current_process = process;
if (process == nullptr) {
return;
@@ -232,11 +252,15 @@ struct KernelCore::Impl {
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
KThread* GetHostDummyThread() {
const thread_local auto thread =
KThread::CreateThread(
system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0,
KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr)
.Unwrap();
auto make_thread = [this]() {
std::unique_ptr<KThread> thread = std::make_unique<KThread>(system.Kernel());
KAutoObject::Create(thread.get());
ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess());
thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
return std::move(thread);
};
thread_local auto thread = make_thread();
return thread.get();
}
@@ -371,7 +395,8 @@ struct KernelCore::Impl {
const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit();
// Determine the size of the slab region.
const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize);
const size_t slab_region_size =
Common::AlignUp(Init::CalculateTotalSlabHeapSize(system.Kernel()), PageSize);
ASSERT(slab_region_size <= resource_region_size);
// Setup the slab region.
@@ -569,25 +594,30 @@ struct KernelCore::Impl {
const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
hid_shared_mem = Kernel::KSharedMemory::Create(
system.Kernel(), system.DeviceMemory(), nullptr, {hid_phys_addr, hid_size / PageSize},
KMemoryPermission::None, KMemoryPermission::Read, hid_phys_addr, hid_size,
"HID:SharedMemory");
font_shared_mem = Kernel::KSharedMemory::Create(
system.Kernel(), system.DeviceMemory(), nullptr, {font_phys_addr, font_size / PageSize},
KMemoryPermission::None, KMemoryPermission::Read, font_phys_addr, font_size,
"Font:SharedMemory");
irs_shared_mem = Kernel::KSharedMemory::Create(
system.Kernel(), system.DeviceMemory(), nullptr, {irs_phys_addr, irs_size / PageSize},
KMemoryPermission::None, KMemoryPermission::Read, irs_phys_addr, irs_size,
"IRS:SharedMemory");
time_shared_mem = Kernel::KSharedMemory::Create(
system.Kernel(), system.DeviceMemory(), nullptr, {time_phys_addr, time_size / PageSize},
KMemoryPermission::None, KMemoryPermission::Read, time_phys_addr, time_size,
"Time:SharedMemory");
hid_shared_mem = KSharedMemory::Create(system.Kernel());
font_shared_mem = KSharedMemory::Create(system.Kernel());
irs_shared_mem = KSharedMemory::Create(system.Kernel());
time_shared_mem = KSharedMemory::Create(system.Kernel());
hid_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
{hid_phys_addr, hid_size / PageSize},
Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
hid_phys_addr, hid_size, "HID:SharedMemory");
font_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
{font_phys_addr, font_size / PageSize},
Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
font_phys_addr, font_size, "Font:SharedMemory");
irs_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
{irs_phys_addr, irs_size / PageSize},
Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
irs_phys_addr, irs_size, "IRS:SharedMemory");
time_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
{time_phys_addr, time_size / PageSize},
Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
time_phys_addr, time_size, "Time:SharedMemory");
}
void InitializeSlabHeaps() {
void InitializePageSlab() {
// Allocate slab heaps
user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>();
@@ -596,30 +626,33 @@ struct KernelCore::Impl {
// Reserve slab heaps
ASSERT(
system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
// Initialize slab heaps
// Initialize slab heap
user_slab_heap_pages->Initialize(
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
user_slab_heap_size);
}
std::atomic<u32> next_object_id{0};
std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin};
std::atomic<u64> next_thread_id{1};
// Lists all processes that exist in the current session.
std::vector<std::shared_ptr<Process>> process_list;
Process* current_process = nullptr;
std::vector<KProcess*> process_list;
KProcess* current_process{};
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
Kernel::TimeManager time_manager;
std::shared_ptr<KResourceLimit> system_resource_limit;
Init::KSlabResourceCounts slab_resource_counts{};
KResourceLimit* system_resource_limit{};
std::shared_ptr<Core::Timing::EventType> preemption_event;
// This is the kernel's handle table or supervisor handle table which
// stores all the objects in place.
HandleTable global_handle_table;
std::unique_ptr<KHandleTable> global_handle_table;
KAutoObjectWithListContainer object_list_container;
/// Map of named ports managed by the kernel, which can be retrieved using
/// the ConnectToPort SVC.
@@ -636,10 +669,10 @@ struct KernelCore::Impl {
std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
// Shared memory for services
std::shared_ptr<Kernel::KSharedMemory> hid_shared_mem;
std::shared_ptr<Kernel::KSharedMemory> font_shared_mem;
std::shared_ptr<Kernel::KSharedMemory> irs_shared_mem;
std::shared_ptr<Kernel::KSharedMemory> time_shared_mem;
Kernel::KSharedMemory* hid_shared_mem{};
Kernel::KSharedMemory* font_shared_mem{};
Kernel::KSharedMemory* irs_shared_mem{};
Kernel::KSharedMemory* time_shared_mem{};
// Threads used for services
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
@@ -648,7 +681,7 @@ struct KernelCore::Impl {
// the release of itself
std::unique_ptr<Common::ThreadWorker> service_thread_manager;
std::array<std::shared_ptr<KThread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
@@ -663,15 +696,14 @@ struct KernelCore::Impl {
};
KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system, *this)} {}
KernelCore::~KernelCore() {
Shutdown();
}
KernelCore::~KernelCore() = default;
void KernelCore::SetMulticore(bool is_multicore) {
impl->SetMulticore(is_multicore);
}
void KernelCore::Initialize() {
slab_heap_container = std::make_unique<SlabHeapContainer>();
impl->Initialize(*this);
}
@@ -683,31 +715,35 @@ void KernelCore::Shutdown() {
impl->Shutdown();
}
std::shared_ptr<KResourceLimit> KernelCore::GetSystemResourceLimit() const {
const KResourceLimit* KernelCore::GetSystemResourceLimit() const {
return impl->system_resource_limit;
}
std::shared_ptr<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
return impl->global_handle_table.Get<KThread>(handle);
KResourceLimit* KernelCore::GetSystemResourceLimit() {
return impl->system_resource_limit;
}
void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) {
impl->process_list.push_back(std::move(process));
KScopedAutoObject<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
return impl->global_handle_table->GetObject<KThread>(handle);
}
void KernelCore::MakeCurrentProcess(Process* process) {
void KernelCore::AppendNewProcess(KProcess* process) {
impl->process_list.push_back(process);
}
void KernelCore::MakeCurrentProcess(KProcess* process) {
impl->MakeCurrentProcess(process);
}
Process* KernelCore::CurrentProcess() {
KProcess* KernelCore::CurrentProcess() {
return impl->current_process;
}
const Process* KernelCore::CurrentProcess() const {
const KProcess* KernelCore::CurrentProcess() const {
return impl->current_process;
}
const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const {
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
return impl->process_list;
}
@@ -781,6 +817,14 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
return *impl->exclusive_monitor;
}
KAutoObjectWithListContainer& KernelCore::ObjectListContainer() {
return impl->object_list_container;
}
const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
return impl->object_list_container;
}
void KernelCore::InvalidateAllInstructionCaches() {
for (auto& physical_core : impl->cores) {
physical_core.ArmInterface().ClearInstructionCache();
@@ -800,8 +844,9 @@ void KernelCore::PrepareReschedule(std::size_t id) {
// TODO: Reimplement, this
}
void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) {
impl->named_ports.emplace(std::move(name), std::move(port));
void KernelCore::AddNamedPort(std::string name, KClientPort* port) {
port->Open();
impl->named_ports.emplace(std::move(name), port);
}
KernelCore::NamedPortTable::iterator KernelCore::FindNamedPort(const std::string& name) {
@@ -833,12 +878,12 @@ u64 KernelCore::CreateNewUserProcessID() {
return impl->next_user_process_id++;
}
Kernel::HandleTable& KernelCore::GlobalHandleTable() {
return impl->global_handle_table;
KHandleTable& KernelCore::GlobalHandleTable() {
return *impl->global_handle_table;
}
const Kernel::HandleTable& KernelCore::GlobalHandleTable() const {
return impl->global_handle_table;
const KHandleTable& KernelCore::GlobalHandleTable() const {
return *impl->global_handle_table;
}
void KernelCore::RegisterCoreThread(std::size_t core_id) {
@@ -910,9 +955,9 @@ void KernelCore::Suspend(bool in_suspention) {
{
KScopedSchedulerLock lock(*this);
const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
impl->suspend_threads[i]->SetState(state);
impl->suspend_threads[i]->SetWaitReasonForDebugging(
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
impl->suspend_threads[core_id]->SetState(state);
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
ThreadWaitReasonForDebugging::Suspended);
}
}
@@ -952,6 +997,14 @@ void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> servi
});
}
Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() {
return impl->slab_resource_counts;
}
const Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() const {
return impl->slab_resource_counts;
}
bool KernelCore::IsPhantomModeForSingleCore() const {
return impl->IsPhantomModeForSingleCore();
}
@@ -960,4 +1013,12 @@ void KernelCore::SetIsPhantomModeForSingleCore(bool value) {
impl->SetIsPhantomModeForSingleCore(value);
}
Core::System& KernelCore::System() {
return impl->system;
}
const Core::System& KernelCore::System() const {
return impl->system;
}
} // namespace Kernel

View File

@@ -11,8 +11,10 @@
#include <vector>
#include "core/arm/cpu_interrupt_handler.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/svc_common.h"
namespace Core {
class CPUInterruptHandler;
@@ -27,20 +29,32 @@ struct EventType;
namespace Kernel {
class ClientPort;
class KClientPort;
class GlobalSchedulerContext;
class HandleTable;
class KAutoObjectWithListContainer;
class KClientSession;
class KEvent;
class KHandleTable;
class KLinkedListNode;
class KMemoryManager;
class KPort;
class KProcess;
class KResourceLimit;
class KScheduler;
class KSession;
class KSharedMemory;
class KThread;
class KTransferMemory;
class KWritableEvent;
class PhysicalCore;
class Process;
class ServiceThread;
class Synchronization;
class TimeManager;
namespace Init {
struct KSlabResourceCounts;
}
template <typename T>
class KSlabHeap;
@@ -51,7 +65,7 @@ constexpr EmuThreadHandle EmuThreadHandleReserved{1ULL << 63};
/// Represents a single instance of the kernel.
class KernelCore {
private:
using NamedPortTable = std::unordered_map<std::string, std::shared_ptr<ClientPort>>;
using NamedPortTable = std::unordered_map<std::string, KClientPort*>;
public:
/// Constructs an instance of the kernel using the given System
@@ -83,25 +97,28 @@ public:
void Shutdown();
/// Retrieves a shared pointer to the system resource limit instance.
std::shared_ptr<KResourceLimit> GetSystemResourceLimit() const;
const KResourceLimit* GetSystemResourceLimit() const;
/// Retrieves a shared pointer to the system resource limit instance.
KResourceLimit* GetSystemResourceLimit();
/// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
std::shared_ptr<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
KScopedAutoObject<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
/// Adds the given shared pointer to an internal list of active processes.
void AppendNewProcess(std::shared_ptr<Process> process);
void AppendNewProcess(KProcess* process);
/// Makes the given process the new current process.
void MakeCurrentProcess(Process* process);
void MakeCurrentProcess(KProcess* process);
/// Retrieves a pointer to the current process.
Process* CurrentProcess();
KProcess* CurrentProcess();
/// Retrieves a const pointer to the current process.
const Process* CurrentProcess() const;
const KProcess* CurrentProcess() const;
/// Retrieves the list of processes.
const std::vector<std::shared_ptr<Process>>& GetProcessList() const;
const std::vector<KProcess*>& GetProcessList() const;
/// Gets the sole instance of the global scheduler
Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
@@ -143,6 +160,10 @@ public:
const Core::ExclusiveMonitor& GetExclusiveMonitor() const;
KAutoObjectWithListContainer& ObjectListContainer();
const KAutoObjectWithListContainer& ObjectListContainer() const;
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts();
const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const;
@@ -152,7 +173,7 @@ public:
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
/// Adds a port to the named port table
void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port);
void AddNamedPort(std::string name, KClientPort* port);
/// Finds a port within the named port table with the given name.
NamedPortTable::iterator FindNamedPort(const std::string& name);
@@ -225,9 +246,10 @@ public:
/**
* Creates an HLE service thread, which are used to execute service routines asynchronously.
* While these are allocated per ServerSession, these need to be owned and managed outside of
* ServerSession to avoid a circular dependency.
* @param name String name for the ServerSession creating this thread, used for debug purposes.
* While these are allocated per ServerSession, these need to be owned and managed outside
* of ServerSession to avoid a circular dependency.
* @param name String name for the ServerSession creating this thread, used for debug
* purposes.
* @returns The a weak pointer newly created service thread.
*/
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
@@ -243,9 +265,45 @@ public:
bool IsPhantomModeForSingleCore() const;
void SetIsPhantomModeForSingleCore(bool value);
Core::System& System();
const Core::System& System() const;
/// Gets the slab heap for the specified kernel object type.
template <typename T>
KSlabHeap<T>& SlabHeap() {
if constexpr (std::is_same_v<T, KClientSession>) {
return slab_heap_container->client_session;
} else if constexpr (std::is_same_v<T, KEvent>) {
return slab_heap_container->event;
} else if constexpr (std::is_same_v<T, KLinkedListNode>) {
return slab_heap_container->linked_list_node;
} else if constexpr (std::is_same_v<T, KPort>) {
return slab_heap_container->port;
} else if constexpr (std::is_same_v<T, KProcess>) {
return slab_heap_container->process;
} else if constexpr (std::is_same_v<T, KResourceLimit>) {
return slab_heap_container->resource_limit;
} else if constexpr (std::is_same_v<T, KSession>) {
return slab_heap_container->session;
} else if constexpr (std::is_same_v<T, KSharedMemory>) {
return slab_heap_container->shared_memory;
} else if constexpr (std::is_same_v<T, KThread>) {
return slab_heap_container->thread;
} else if constexpr (std::is_same_v<T, KTransferMemory>) {
return slab_heap_container->transfer_memory;
} else if constexpr (std::is_same_v<T, KWritableEvent>) {
return slab_heap_container->writeable_event;
}
}
/// Gets the current slab resource counts.
Init::KSlabResourceCounts& SlabResourceCounts();
/// Gets the current slab resource counts.
const Init::KSlabResourceCounts& SlabResourceCounts() const;
private:
friend class Object;
friend class Process;
friend class KProcess;
friend class KThread;
/// Creates a new object ID, incrementing the internal object ID counter.
@@ -261,14 +319,33 @@ private:
u64 CreateNewThreadID();
/// Provides a reference to the global handle table.
Kernel::HandleTable& GlobalHandleTable();
KHandleTable& GlobalHandleTable();
/// Provides a const reference to the global handle table.
const Kernel::HandleTable& GlobalHandleTable() const;
const KHandleTable& GlobalHandleTable() const;
struct Impl;
std::unique_ptr<Impl> impl;
bool exception_exited{};
private:
/// Helper to encapsulate all slab heaps in a single heap allocated container
struct SlabHeapContainer {
KSlabHeap<KClientSession> client_session;
KSlabHeap<KEvent> event;
KSlabHeap<KLinkedListNode> linked_list_node;
KSlabHeap<KPort> port;
KSlabHeap<KProcess> process;
KSlabHeap<KResourceLimit> resource_limit;
KSlabHeap<KSession> session;
KSlabHeap<KSharedMemory> shared_memory;
KSlabHeap<KThread> thread;
KSlabHeap<KTransferMemory> transfer_memory;
KSlabHeap<KWritableEvent> writeable_event;
};
std::unique_ptr<SlabHeapContainer> slab_heap_container;
};
} // namespace Kernel

View File

@@ -6,7 +6,7 @@
#include "common/bit_util.h"
#include "common/logging/log.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/svc_results.h"
@@ -99,7 +99,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() {
interrupt_capabilities.set();
// Allow using the maximum possible amount of handles
handle_table_size = static_cast<s32>(HandleTable::MAX_COUNT);
handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize);
// Allow all debugging capabilities.
is_debuggable = true;
@@ -159,7 +159,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
const auto type = GetCapabilityType(flag);
if (type == CapabilityType::Unset) {
return ResultInvalidCapabilityDescriptor;
return ResultInvalidArgument;
}
// Bail early on ignorable entries, as one would expect,
@@ -202,7 +202,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
}
LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
return ResultInvalidCapabilityDescriptor;
return ResultInvalidArgument;
}
void ProcessCapabilities::Clear() {
@@ -225,7 +225,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
if (priority_mask != 0 || core_mask != 0) {
LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
priority_mask, core_mask);
return ResultInvalidCapabilityDescriptor;
return ResultInvalidArgument;
}
const u32 core_num_min = (flags >> 16) & 0xFF;
@@ -329,7 +329,7 @@ ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
const u32 reserved = flags >> 17;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
return ResultReservedValue;
return ResultReservedUsed;
}
program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
@@ -349,7 +349,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
LOG_ERROR(Kernel,
"Kernel version is non zero or flags are too small! major_version={}, flags={}",
major_version, flags);
return ResultInvalidCapabilityDescriptor;
return ResultInvalidArgument;
}
kernel_version = flags;
@@ -360,7 +360,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
const u32 reserved = flags >> 26;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
return ResultReservedValue;
return ResultReservedUsed;
}
handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
@@ -371,7 +371,7 @@ ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
const u32 reserved = flags >> 19;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
return ResultReservedValue;
return ResultReservedUsed;
}
is_debuggable = (flags & 0x20000) != 0;

View File

@@ -13,8 +13,8 @@
#include "common/scope_exit.h"
#include "common/thread.h"
#include "core/core.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/service_thread.h"
#include "core/hle/lock.h"
#include "video_core/renderer_base.h"
@@ -26,7 +26,7 @@ public:
explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name);
~Impl();
void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
private:
std::vector<std::thread> threads;
@@ -69,18 +69,27 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
});
}
void ServiceThread::Impl::QueueSyncRequest(ServerSession& session,
void ServiceThread::Impl::QueueSyncRequest(KSession& session,
std::shared_ptr<HLERequestContext>&& context) {
{
std::unique_lock lock{queue_mutex};
// ServerSession owns the service thread, so we cannot caption a strong pointer here in the
// event that the ServerSession is terminated.
std::weak_ptr<ServerSession> weak_ptr{SharedFrom(&session)};
requests.emplace([weak_ptr, context{std::move(context)}]() {
if (auto strong_ptr = weak_ptr.lock()) {
strong_ptr->CompleteSyncRequest(*context);
// Open a reference to the session to ensure it is not closes while the service request
// completes asynchronously.
session.Open();
requests.emplace([session_ptr{&session}, context{std::move(context)}]() {
// Close the reference.
SCOPE_EXIT({ session_ptr->Close(); });
// If the session has been closed, we are done.
if (session_ptr->IsServerClosed()) {
return;
}
// Complete the service request.
KScopedAutoObject server_session{&session_ptr->GetServerSession()};
server_session->CompleteSyncRequest(*context);
});
}
condition.notify_one();
@@ -102,7 +111,7 @@ ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const
ServiceThread::~ServiceThread() = default;
void ServiceThread::QueueSyncRequest(ServerSession& session,
void ServiceThread::QueueSyncRequest(KSession& session,
std::shared_ptr<HLERequestContext>&& context) {
impl->QueueSyncRequest(session, std::move(context));
}

View File

@@ -11,14 +11,14 @@ namespace Kernel {
class HLERequestContext;
class KernelCore;
class ServerSession;
class KSession;
class ServiceThread final {
public:
explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name);
~ServiceThread();
void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
private:
class Impl;

File diff suppressed because it is too large Load Diff

View File

@@ -6,9 +6,24 @@
#include "common/common_types.h"
namespace Kernel {
using Handle = u32;
}
namespace Kernel::Svc {
constexpr s32 ArgumentHandleCountMax = 0x40;
constexpr u32 HandleWaitMask{1u << 30};
constexpr inline Handle InvalidHandle = Handle(0);
enum PseudoHandle : Handle {
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
constexpr bool IsPseudoHandle(Handle handle) {
return handle == PseudoHandle::CurrentProcess || handle == PseudoHandle::CurrentThread;
}
} // namespace Kernel::Svc

View File

@@ -10,18 +10,18 @@ namespace Kernel {
// Confirmed Switch kernel error codes
constexpr ResultCode ResultMaxConnectionsReached{ErrorModule::Kernel, 7};
constexpr ResultCode ResultInvalidCapabilityDescriptor{ErrorModule::Kernel, 14};
constexpr ResultCode ResultOutOfSessions{ErrorModule::Kernel, 7};
constexpr ResultCode ResultInvalidArgument{ErrorModule::Kernel, 14};
constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101};
constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103};
constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104};
constexpr ResultCode ResultHandleTableFull{ErrorModule::Kernel, 105};
constexpr ResultCode ResultOutOfHandles{ErrorModule::Kernel, 105};
constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
constexpr ResultCode ResultInvalidMemoryPermissions{ErrorModule::Kernel, 108};
constexpr ResultCode ResultInvalidMemoryRange{ErrorModule::Kernel, 110};
constexpr ResultCode ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108};
constexpr ResultCode ResultInvalidMemoryRegion{ErrorModule::Kernel, 110};
constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112};
constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113};
constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
@@ -33,9 +33,11 @@ constexpr ResultCode ResultOutOfRange{ErrorModule::Kernel, 119};
constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121};
constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122};
constexpr ResultCode ResultSessionClosedByRemote{ErrorModule::Kernel, 123};
constexpr ResultCode ResultSessionClosed{ErrorModule::Kernel, 123};
constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
constexpr ResultCode ResultReservedValue{ErrorModule::Kernel, 126};
constexpr ResultCode ResultResourceLimitedExceeded{ErrorModule::Kernel, 132};
constexpr ResultCode ResultReservedUsed{ErrorModule::Kernel, 126};
constexpr ResultCode ResultPortClosed{ErrorModule::Kernel, 131};
constexpr ResultCode ResultLimitReached{ErrorModule::Kernel, 132};
constexpr ResultCode ResultInvalidId{ErrorModule::Kernel, 519};
} // namespace Kernel

View File

@@ -154,15 +154,28 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
// Used by GetResourceLimitLimitValue.
template <ResultCode func(Core::System&, u64*, Handle, LimitableResource)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<Handle>(Param(system, 1)),
static_cast<LimitableResource>(Param(system, 2)))
.raw;
system.CurrentArmInterface().SetReg(1, param_1);
FuncReturn(system, retval);
}
template <ResultCode func(Core::System&, u32, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw);
}
template <ResultCode func(Core::System&, u32, u32, u64)>
// Used by SetResourceLimitLimitValue
template <ResultCode func(Core::System&, Handle, LimitableResource, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
static_cast<u32>(Param(system, 1)), Param(system, 2))
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
static_cast<LimitableResource>(Param(system, 1)), Param(system, 2))
.raw);
}
@@ -219,10 +232,11 @@ void SvcWrap64(Core::System& system) {
func(system, Param(system, 0), Param(system, 1), static_cast<u32>(Param(system, 2))).raw);
}
template <ResultCode func(Core::System&, u32, u64, u64, u32)>
// Used by MapSharedMemory
template <ResultCode func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
Param(system, 2), static_cast<u32>(Param(system, 3)))
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)), Param(system, 1),
Param(system, 2), static_cast<Svc::MemoryPermission>(Param(system, 3)))
.raw);
}
@@ -252,11 +266,13 @@ void SvcWrap64(Core::System& system) {
.raw);
}
template <ResultCode func(Core::System&, u64*, u64, u64, u64)>
// Used by GetInfo
template <ResultCode func(Core::System&, u64*, u64, Handle, u64)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval =
func(system, &param_1, Param(system, 1), Param(system, 2), Param(system, 3)).raw;
const u32 retval = func(system, &param_1, Param(system, 1),
static_cast<Handle>(Param(system, 2)), Param(system, 3))
.raw;
system.CurrentArmInterface().SetReg(1, param_1);
FuncReturn(system, retval);
@@ -273,11 +289,12 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
template <ResultCode func(Core::System&, u32*, u64, u64, u32)>
// Used by CreateTransferMemory
template <ResultCode func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2),
static_cast<u32>(Param(system, 3)))
static_cast<Svc::MemoryPermission>(Param(system, 3)))
.raw;
system.CurrentArmInterface().SetReg(1, param_1);
@@ -537,6 +554,16 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval);
}
// Used by MapSharedMemory32
template <ResultCode func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
static_cast<u32>(Param(system, 1)), static_cast<u32>(Param(system, 2)),
static_cast<Svc::MemoryPermission>(Param(system, 3)))
.raw;
FuncReturn(system, retval);
}
// Used by SetThreadCoreMask32
template <ResultCode func(Core::System&, Handle, s32, u32, u32)>
void SvcWrap32(Core::System& system) {
@@ -586,11 +613,12 @@ void SvcWrap32(Core::System& system) {
}
// Used by CreateTransferMemory32
template <ResultCode func(Core::System&, Handle*, u32, u32, u32)>
template <ResultCode func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
Handle handle = 0;
const u32 retval =
func(system, &handle, Param32(system, 1), Param32(system, 2), Param32(system, 3)).raw;
const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2),
static_cast<Svc::MemoryPermission>(Param32(system, 3)))
.raw;
system.CurrentArmInterface().SetReg(1, handle);
FuncReturn(system, retval);
}

View File

@@ -6,7 +6,6 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
@@ -15,16 +14,12 @@
namespace Kernel {
TimeManager::TimeManager(Core::System& system_) : system{system_} {
time_manager_event_type = Core::Timing::CreateEvent(
"Kernel::TimeManagerCallback",
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
std::shared_ptr<KThread> thread;
{
std::lock_guard lock{mutex};
thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle));
}
thread->Wakeup();
});
time_manager_event_type =
Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
KThread* thread = reinterpret_cast<KThread*>(thread_handle);
thread->Wakeup();
});
}
void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {

View File

@@ -8,8 +8,6 @@
#include <mutex>
#include <unordered_map>
#include "core/hle/kernel/object.h"
namespace Core {
class System;
} // namespace Core