remove old files
This commit is contained in:
@@ -1,317 +0,0 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
// Wake up num_to_wake (or all) threads in a vector.
|
||||
void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
|
||||
s32 num_to_wake) {
|
||||
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
|
||||
// them all.
|
||||
std::size_t last = waiting_threads.size();
|
||||
if (num_to_wake > 0) {
|
||||
last = std::min(last, static_cast<std::size_t>(num_to_wake));
|
||||
}
|
||||
|
||||
// Signal the waiting threads.
|
||||
for (std::size_t i = 0; i < last; i++) {
|
||||
waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
RemoveThread(waiting_threads[i]);
|
||||
waiting_threads[i]->WaitForArbitration(false);
|
||||
waiting_threads[i]->ResumeFromWait();
|
||||
}
|
||||
}
|
||||
|
||||
AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
|
||||
AddressArbiter::~AddressArbiter() = default;
|
||||
|
||||
ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
|
||||
s32 num_to_wake) {
|
||||
switch (type) {
|
||||
case SignalType::Signal:
|
||||
return SignalToAddressOnly(address, num_to_wake);
|
||||
case SignalType::IncrementAndSignalIfEqual:
|
||||
return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
|
||||
case SignalType::ModifyByWaitingCountAndSignalIfEqual:
|
||||
return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
|
||||
default:
|
||||
return ERR_INVALID_ENUM_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
u32 current_value;
|
||||
do {
|
||||
current_value = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
if (current_value != static_cast<u32>(value)) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
current_value++;
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, current_value));
|
||||
|
||||
return SignalToAddressOnly(address, num_to_wake);
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
// Get threads waiting on the address.
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
s32 updated_value;
|
||||
do {
|
||||
updated_value = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
if (updated_value != value) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
// Determine the modified value depending on the waiting count.
|
||||
if (num_to_wake <= 0) {
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else {
|
||||
updated_value = value - 1;
|
||||
}
|
||||
} else {
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
|
||||
updated_value = value - 1;
|
||||
} else {
|
||||
updated_value = value;
|
||||
}
|
||||
}
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
|
||||
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
|
||||
s64 timeout_ns) {
|
||||
switch (type) {
|
||||
case ArbitrationType::WaitIfLessThan:
|
||||
return WaitForAddressIfLessThan(address, value, timeout_ns, false);
|
||||
case ArbitrationType::DecrementAndWaitIfLessThan:
|
||||
return WaitForAddressIfLessThan(address, value, timeout_ns, true);
|
||||
case ArbitrationType::WaitIfEqual:
|
||||
return WaitForAddressIfEqual(address, value, timeout_ns);
|
||||
default:
|
||||
return ERR_INVALID_ENUM_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
||||
bool should_decrement) {
|
||||
auto& memory = system.Memory();
|
||||
auto& kernel = system.Kernel();
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||
|
||||
if (current_thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
}
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
||||
if (current_value >= value) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
|
||||
s32 decrement_value;
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
do {
|
||||
current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
if (should_decrement) {
|
||||
decrement_value = current_value - 1;
|
||||
} else {
|
||||
decrement_value = current_value;
|
||||
}
|
||||
} while (
|
||||
!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
|
||||
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
if (timeout == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
InsertThread(SharedFrom(current_thread));
|
||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
||||
current_thread->WaitForArbitration(true);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (current_thread->IsWaitingForArbitration()) {
|
||||
RemoveThread(SharedFrom(current_thread));
|
||||
current_thread->WaitForArbitration(false);
|
||||
}
|
||||
}
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
|
||||
auto& memory = system.Memory();
|
||||
auto& kernel = system.Kernel();
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||
|
||||
if (current_thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
}
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
||||
if (current_value != value) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
if (timeout == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
InsertThread(SharedFrom(current_thread));
|
||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
||||
current_thread->WaitForArbitration(true);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (current_thread->IsWaitingForArbitration()) {
|
||||
RemoveThread(SharedFrom(current_thread));
|
||||
current_thread->WaitForArbitration(false);
|
||||
}
|
||||
}
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
|
||||
const auto iter =
|
||||
std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
|
||||
return entry->GetPriority() >= thread->GetPriority();
|
||||
});
|
||||
|
||||
if (iter == thread_list.cend()) {
|
||||
thread_list.push_back(std::move(thread));
|
||||
} else {
|
||||
thread_list.insert(iter, std::move(thread));
|
||||
}
|
||||
}
|
||||
|
||||
void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
|
||||
const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
|
||||
[&thread](const auto& entry) { return thread == entry; });
|
||||
|
||||
if (iter != thread_list.cend()) {
|
||||
thread_list.erase(iter);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
|
||||
VAddr address) const {
|
||||
const auto iter = arb_threads.find(address);
|
||||
if (iter == arb_threads.cend()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
|
||||
return {thread_list.cbegin(), thread_list.cend()};
|
||||
}
|
||||
} // namespace Kernel
|
@@ -1,91 +0,0 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Thread;
|
||||
|
||||
class AddressArbiter {
|
||||
public:
|
||||
enum class ArbitrationType {
|
||||
WaitIfLessThan = 0,
|
||||
DecrementAndWaitIfLessThan = 1,
|
||||
WaitIfEqual = 2,
|
||||
};
|
||||
|
||||
enum class SignalType {
|
||||
Signal = 0,
|
||||
IncrementAndSignalIfEqual = 1,
|
||||
ModifyByWaitingCountAndSignalIfEqual = 2,
|
||||
};
|
||||
|
||||
explicit AddressArbiter(Core::System& system);
|
||||
~AddressArbiter();
|
||||
|
||||
AddressArbiter(const AddressArbiter&) = delete;
|
||||
AddressArbiter& operator=(const AddressArbiter&) = delete;
|
||||
|
||||
AddressArbiter(AddressArbiter&&) = default;
|
||||
AddressArbiter& operator=(AddressArbiter&&) = delete;
|
||||
|
||||
/// Signals an address being waited on with a particular signaling type.
|
||||
ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
|
||||
|
||||
/// Waits on an address with a particular arbitration type.
|
||||
ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
|
||||
|
||||
private:
|
||||
/// Signals an address being waited on.
|
||||
ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
|
||||
|
||||
/// Signals an address being waited on and increments its value if equal to the value argument.
|
||||
ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
|
||||
|
||||
/// Signals an address being waited on and modifies its value based on waiting thread count if
|
||||
/// equal to the value argument.
|
||||
ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake);
|
||||
|
||||
/// Waits on an address if the value passed is less than the argument value,
|
||||
/// optionally decrementing.
|
||||
ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
||||
bool should_decrement);
|
||||
|
||||
/// Waits on an address if the value passed is equal to the argument value.
|
||||
ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
|
||||
|
||||
/// Wake up num_to_wake (or all) threads in a vector.
|
||||
void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
|
||||
|
||||
/// Insert a thread into the address arbiter container
|
||||
void InsertThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the address arbiter container
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
// Gets the threads waiting on an address.
|
||||
std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
|
||||
|
||||
/// List of threads waiting for a address arbiter
|
||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
|
||||
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,47 +0,0 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/server_port.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ClientPort::ClientPort(KernelCore& kernel) : Object{kernel} {}
|
||||
ClientPort::~ClientPort() = default;
|
||||
|
||||
std::shared_ptr<ServerPort> ClientPort::GetServerPort() const {
|
||||
return server_port;
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
|
||||
if (active_sessions >= max_sessions) {
|
||||
return ResultMaxConnectionsReached;
|
||||
}
|
||||
active_sessions++;
|
||||
|
||||
auto [client, server] = Kernel::Session::Create(kernel, name);
|
||||
|
||||
if (server_port->HasHLEHandler()) {
|
||||
server_port->GetHLEHandler()->ClientConnected(std::move(server));
|
||||
} else {
|
||||
server_port->AppendPendingSession(std::move(server));
|
||||
}
|
||||
|
||||
return MakeResult(std::move(client));
|
||||
}
|
||||
|
||||
void ClientPort::ConnectionClosed() {
|
||||
if (active_sessions == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
--active_sessions;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,63 +0,0 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ClientSession;
|
||||
class KernelCore;
|
||||
class ServerPort;
|
||||
|
||||
class ClientPort final : public Object {
|
||||
public:
|
||||
explicit ClientPort(KernelCore& kernel);
|
||||
~ClientPort() override;
|
||||
|
||||
friend class ServerPort;
|
||||
std::string GetTypeName() const override {
|
||||
return "ClientPort";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ClientPort;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
std::shared_ptr<ServerPort> GetServerPort() const;
|
||||
|
||||
/**
|
||||
* Creates a new Session pair, adds the created ServerSession to the associated ServerPort's
|
||||
* list of pending sessions, and signals the ServerPort, causing any threads
|
||||
* waiting on it to awake.
|
||||
* @returns ClientSession The client endpoint of the created Session pair, or error code.
|
||||
*/
|
||||
ResultVal<std::shared_ptr<ClientSession>> Connect();
|
||||
|
||||
/**
|
||||
* Signifies that a previously active connection has been closed,
|
||||
* decreasing the total number of active connections to this port.
|
||||
*/
|
||||
void ConnectionClosed();
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
private:
|
||||
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
|
||||
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
|
||||
u32 active_sessions = 0; ///< Number of currently open sessions to this port
|
||||
std::string name; ///< Name of client port (optional)
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,53 +0,0 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
|
||||
ClientSession::~ClientSession() {
|
||||
// This destructor will be called automatically when the last ClientSession handle is closed by
|
||||
// the emulated application.
|
||||
if (parent->Server()) {
|
||||
parent->Server()->ClientDisconnected();
|
||||
}
|
||||
}
|
||||
|
||||
bool ClientSession::IsSignaled() const {
|
||||
UNIMPLEMENTED();
|
||||
return true;
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name) {
|
||||
std::shared_ptr<ClientSession> client_session{std::make_shared<ClientSession>(kernel)};
|
||||
|
||||
client_session->name = std::move(name);
|
||||
client_session->parent = std::move(parent);
|
||||
|
||||
return MakeResult(std::move(client_session));
|
||||
}
|
||||
|
||||
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread,
|
||||
Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
// Keep ServerSession alive until we're done working with it.
|
||||
if (!parent->Server()) {
|
||||
return ResultSessionClosedByRemote;
|
||||
}
|
||||
|
||||
// Signal the server session that new data is available
|
||||
return parent->Server()->HandleSyncRequest(std::move(thread), memory, core_timing);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,68 +0,0 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Session;
|
||||
class KThread;
|
||||
|
||||
class ClientSession final : public KSynchronizationObject {
|
||||
public:
|
||||
explicit ClientSession(KernelCore& kernel);
|
||||
~ClientSession() override;
|
||||
|
||||
friend class Session;
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ClientSession";
|
||||
}
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ClientSession;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
private:
|
||||
static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name = "Unknown");
|
||||
|
||||
/// The parent session, which links to the server endpoint.
|
||||
std::shared_ptr<Session> parent;
|
||||
|
||||
/// Name of the client session (optional)
|
||||
std::string name;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,43 +0,0 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
// Confirmed Switch kernel error codes
|
||||
|
||||
constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
|
||||
constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
|
||||
constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
|
||||
constexpr ResultCode ERR_TERMINATION_REQUESTED{ErrorModule::Kernel, 59};
|
||||
constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
|
||||
constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
|
||||
constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
|
||||
constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
|
||||
constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
|
||||
constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
|
||||
constexpr ResultCode ERR_INVALID_CURRENT_MEMORY{ErrorModule::Kernel, 106};
|
||||
constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108};
|
||||
constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110};
|
||||
constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113};
|
||||
constexpr ResultCode ERR_INVALID_THREAD_PRIORITY{ErrorModule::Kernel, 112};
|
||||
constexpr ResultCode ERR_INVALID_HANDLE{ErrorModule::Kernel, 114};
|
||||
constexpr ResultCode ERR_INVALID_POINTER{ErrorModule::Kernel, 115};
|
||||
constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116};
|
||||
constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117};
|
||||
constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118};
|
||||
constexpr ResultCode ERR_CANCELLED{ErrorModule::Kernel, 118};
|
||||
constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119};
|
||||
constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120};
|
||||
constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121};
|
||||
constexpr ResultCode ERR_BUSY{ErrorModule::Kernel, 122};
|
||||
constexpr ResultCode ERR_SESSION_CLOSED_BY_REMOTE{ErrorModule::Kernel, 123};
|
||||
constexpr ResultCode ERR_INVALID_STATE{ErrorModule::Kernel, 125};
|
||||
constexpr ResultCode ERR_RESERVED_VALUE{ErrorModule::Kernel, 126};
|
||||
constexpr ResultCode ERR_RESOURCE_LIMIT_EXCEEDED{ErrorModule::Kernel, 132};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,131 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <utility>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
constexpr u16 GetSlot(Handle handle) {
|
||||
return static_cast<u16>(handle >> 15);
|
||||
}
|
||||
|
||||
constexpr u16 GetGeneration(Handle handle) {
|
||||
return static_cast<u16>(handle & 0x7FFF);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
HandleTable::HandleTable(KernelCore& kernel) : kernel{kernel} {
|
||||
Clear();
|
||||
}
|
||||
|
||||
HandleTable::~HandleTable() = default;
|
||||
|
||||
ResultCode HandleTable::SetSize(s32 handle_table_size) {
|
||||
if (static_cast<u32>(handle_table_size) > MAX_COUNT) {
|
||||
LOG_ERROR(Kernel, "Handle table size {} is greater than {}", handle_table_size, MAX_COUNT);
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
|
||||
// Values less than or equal to zero indicate to use the maximum allowable
|
||||
// size for the handle table in the actual kernel, so we ignore the given
|
||||
// value in that case, since we assume this by default unless this function
|
||||
// is called.
|
||||
if (handle_table_size > 0) {
|
||||
table_size = static_cast<u16>(handle_table_size);
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
|
||||
DEBUG_ASSERT(obj != nullptr);
|
||||
|
||||
const u16 slot = next_free_slot;
|
||||
if (slot >= table_size) {
|
||||
LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
|
||||
return ResultHandleTableFull;
|
||||
}
|
||||
next_free_slot = generations[slot];
|
||||
|
||||
const u16 generation = next_generation++;
|
||||
|
||||
// Overflow count so it fits in the 15 bits dedicated to the generation in the handle.
|
||||
// Horizon OS uses zero to represent an invalid handle, so skip to 1.
|
||||
if (next_generation >= (1 << 15)) {
|
||||
next_generation = 1;
|
||||
}
|
||||
|
||||
generations[slot] = generation;
|
||||
objects[slot] = std::move(obj);
|
||||
|
||||
Handle handle = generation | (slot << 15);
|
||||
return MakeResult<Handle>(handle);
|
||||
}
|
||||
|
||||
ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
|
||||
std::shared_ptr<Object> object = GetGeneric(handle);
|
||||
if (object == nullptr) {
|
||||
LOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle);
|
||||
return ResultInvalidHandle;
|
||||
}
|
||||
return Create(std::move(object));
|
||||
}
|
||||
|
||||
ResultCode HandleTable::Close(Handle handle) {
|
||||
if (!IsValid(handle)) {
|
||||
LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle);
|
||||
return ResultInvalidHandle;
|
||||
}
|
||||
|
||||
const u16 slot = GetSlot(handle);
|
||||
|
||||
if (objects[slot].use_count() == 1) {
|
||||
objects[slot]->Finalize();
|
||||
}
|
||||
|
||||
objects[slot] = nullptr;
|
||||
|
||||
generations[slot] = next_free_slot;
|
||||
next_free_slot = slot;
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
bool HandleTable::IsValid(Handle handle) const {
|
||||
const std::size_t slot = GetSlot(handle);
|
||||
const u16 generation = GetGeneration(handle);
|
||||
|
||||
return slot < table_size && objects[slot] != nullptr && generations[slot] == generation;
|
||||
}
|
||||
|
||||
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
|
||||
if (handle == CurrentThread) {
|
||||
return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||
} else if (handle == CurrentProcess) {
|
||||
return SharedFrom(kernel.CurrentProcess());
|
||||
}
|
||||
|
||||
if (!IsValid(handle)) {
|
||||
return nullptr;
|
||||
}
|
||||
return objects[GetSlot(handle)];
|
||||
}
|
||||
|
||||
void HandleTable::Clear() {
|
||||
for (u16 i = 0; i < table_size; ++i) {
|
||||
generations[i] = static_cast<u16>(i + 1);
|
||||
objects[i] = nullptr;
|
||||
}
|
||||
next_free_slot = 0;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,144 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
enum KernelHandle : Handle {
|
||||
InvalidHandle = 0,
|
||||
CurrentThread = 0xFFFF8000,
|
||||
CurrentProcess = 0xFFFF8001,
|
||||
};
|
||||
|
||||
/**
|
||||
* This class allows the creation of Handles, which are references to objects that can be tested
|
||||
* for validity and looked up. Here they are used to pass references to kernel objects to/from the
|
||||
* emulated process. it has been designed so that it follows the same handle format and has
|
||||
* approximately the same restrictions as the handle manager in the CTR-OS.
|
||||
*
|
||||
* Handles contain two sub-fields: a slot index (bits 31:15) and a generation value (bits 14:0).
|
||||
* The slot index is used to index into the arrays in this class to access the data corresponding
|
||||
* to the Handle.
|
||||
*
|
||||
* To prevent accidental use of a freed Handle whose slot has already been reused, a global counter
|
||||
* is kept and incremented every time a Handle is created. This is the Handle's "generation". The
|
||||
* value of the counter is stored into the Handle as well as in the handle table (in the
|
||||
* "generations" array). When looking up a handle, the Handle's generation must match with the
|
||||
* value stored on the class, otherwise the Handle is considered invalid.
|
||||
*
|
||||
* To find free slots when allocating a Handle without needing to scan the entire object array, the
|
||||
* generations field of unallocated slots is re-purposed as a linked list of indices to free slots.
|
||||
* When a Handle is created, an index is popped off the list and used for the new Handle. When it
|
||||
* is destroyed, it is again pushed onto the list to be re-used by the next allocation. It is
|
||||
* likely that this allocation strategy differs from the one used in CTR-OS, but this hasn't been
|
||||
* verified and isn't likely to cause any problems.
|
||||
*/
|
||||
class HandleTable final : NonCopyable {
|
||||
public:
|
||||
/// This is the maximum limit of handles allowed per process in Horizon
|
||||
static constexpr std::size_t MAX_COUNT = 1024;
|
||||
|
||||
explicit HandleTable(KernelCore& kernel);
|
||||
~HandleTable();
|
||||
|
||||
/**
|
||||
* Sets the number of handles that may be in use at one time
|
||||
* for this handle table.
|
||||
*
|
||||
* @param handle_table_size The desired size to limit the handle table to.
|
||||
*
|
||||
* @returns an error code indicating if initialization was successful.
|
||||
* If initialization was not successful, then ERR_OUT_OF_MEMORY
|
||||
* will be returned.
|
||||
*
|
||||
* @pre handle_table_size must be within the range [0, 1024]
|
||||
*/
|
||||
ResultCode SetSize(s32 handle_table_size);
|
||||
|
||||
/**
|
||||
* Allocates a handle for the given object.
|
||||
* @return The created Handle or one of the following errors:
|
||||
* - `ERR_HANDLE_TABLE_FULL`: the maximum number of handles has been exceeded.
|
||||
*/
|
||||
ResultVal<Handle> Create(std::shared_ptr<Object> obj);
|
||||
|
||||
/**
|
||||
* Returns a new handle that points to the same object as the passed in handle.
|
||||
* @return The duplicated Handle or one of the following errors:
|
||||
* - `ERR_INVALID_HANDLE`: an invalid handle was passed in.
|
||||
* - Any errors returned by `Create()`.
|
||||
*/
|
||||
ResultVal<Handle> Duplicate(Handle handle);
|
||||
|
||||
/**
|
||||
* Closes a handle, removing it from the table and decreasing the object's ref-count.
|
||||
* @return `RESULT_SUCCESS` or one of the following errors:
|
||||
* - `ERR_INVALID_HANDLE`: an invalid handle was passed in.
|
||||
*/
|
||||
ResultCode Close(Handle handle);
|
||||
|
||||
/// Checks if a handle is valid and points to an existing object.
|
||||
bool IsValid(Handle handle) const;
|
||||
|
||||
/**
|
||||
* Looks up a handle.
|
||||
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid.
|
||||
*/
|
||||
std::shared_ptr<Object> GetGeneric(Handle handle) const;
|
||||
|
||||
/**
|
||||
* Looks up a handle while verifying its type.
|
||||
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid or its
|
||||
* type differs from the requested one.
|
||||
*/
|
||||
template <class T>
|
||||
std::shared_ptr<T> Get(Handle handle) const {
|
||||
return DynamicObjectCast<T>(GetGeneric(handle));
|
||||
}
|
||||
|
||||
/// Closes all handles held in this table.
|
||||
void Clear();
|
||||
|
||||
private:
|
||||
/// Stores the Object referenced by the handle or null if the slot is empty.
|
||||
std::array<std::shared_ptr<Object>, MAX_COUNT> objects;
|
||||
|
||||
/**
|
||||
* The value of `next_generation` when the handle was created, used to check for validity. For
|
||||
* empty slots, contains the index of the next free slot in the list.
|
||||
*/
|
||||
std::array<u16, MAX_COUNT> generations;
|
||||
|
||||
/**
|
||||
* The limited size of the handle table. This can be specified by process
|
||||
* capabilities in order to restrict the overall number of handles that
|
||||
* can be created in a process instance
|
||||
*/
|
||||
u16 table_size = static_cast<u16>(MAX_COUNT);
|
||||
|
||||
/**
|
||||
* Global counter of the number of created handles. Stored in `generations` when a handle is
|
||||
* created, and wraps around to 1 when it hits 0x8000.
|
||||
*/
|
||||
u16 next_generation = 1;
|
||||
|
||||
/// Head of the free slots linked list.
|
||||
u16 next_free_slot = 0;
|
||||
|
||||
/// Underlying kernel instance that this handle table operates under.
|
||||
KernelCore& kernel;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,42 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <random>
|
||||
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
template <typename F>
|
||||
u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
||||
// Handle the case where the difference is too large to represent.
|
||||
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
|
||||
return f();
|
||||
}
|
||||
|
||||
// Iterate until we get a value in range.
|
||||
const u64 range_size = ((max + 1) - min);
|
||||
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
|
||||
while (true) {
|
||||
if (const u64 rnd = f(); rnd < effective_max) {
|
||||
return min + (rnd % range_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
u64 KSystemControl::GenerateRandomU64() {
|
||||
static std::random_device device;
|
||||
static std::mt19937 gen(device());
|
||||
static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
|
||||
return distribution(gen);
|
||||
}
|
||||
|
||||
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
|
||||
return GenerateUniformRange(min, max, GenerateRandomU64);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,119 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/memory/address_space_info.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
namespace {
|
||||
|
||||
enum : u64 {
|
||||
Size_1_MB = 0x100000,
|
||||
Size_2_MB = 2 * Size_1_MB,
|
||||
Size_128_MB = 128 * Size_1_MB,
|
||||
Size_1_GB = 0x40000000,
|
||||
Size_2_GB = 2 * Size_1_GB,
|
||||
Size_4_GB = 4 * Size_1_GB,
|
||||
Size_6_GB = 6 * Size_1_GB,
|
||||
Size_64_GB = 64 * Size_1_GB,
|
||||
Size_512_GB = 512 * Size_1_GB,
|
||||
Invalid = std::numeric_limits<u64>::max(),
|
||||
};
|
||||
|
||||
// clang-format off
|
||||
constexpr std::array<AddressSpaceInfo, 13> AddressSpaceInfos{{
|
||||
{ .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = AddressSpaceInfo::Type::Is32Bit, },
|
||||
{ .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = AddressSpaceInfo::Type::Small64Bit, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Is32Bit, },
|
||||
{ .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = AddressSpaceInfo::Type::Small64Bit, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Large64Bit, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Is32Bit },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = AddressSpaceInfo::Type::Stack, },
|
||||
}};
|
||||
// clang-format on
|
||||
|
||||
constexpr bool IsAllowedIndexForAddress(std::size_t index) {
|
||||
return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Invalid;
|
||||
}
|
||||
|
||||
using IndexArray = std::array<std::size_t, static_cast<std::size_t>(AddressSpaceInfo::Type::Count)>;
|
||||
|
||||
constexpr IndexArray AddressSpaceIndices32Bit{
|
||||
0, 1, 0, 2, 0, 3,
|
||||
};
|
||||
|
||||
constexpr IndexArray AddressSpaceIndices36Bit{
|
||||
4, 5, 4, 6, 4, 7,
|
||||
};
|
||||
|
||||
constexpr IndexArray AddressSpaceIndices39Bit{
|
||||
9, 8, 8, 10, 12, 11,
|
||||
};
|
||||
|
||||
constexpr bool IsAllowed32BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
|
||||
type != AddressSpaceInfo::Type::Stack;
|
||||
}
|
||||
|
||||
constexpr bool IsAllowed36BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
|
||||
type != AddressSpaceInfo::Type::Stack;
|
||||
}
|
||||
|
||||
constexpr bool IsAllowed39BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Small64Bit;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
||||
const std::size_t index{static_cast<std::size_t>(type)};
|
||||
switch (width) {
|
||||
case 32:
|
||||
ASSERT(IsAllowed32BitType(type));
|
||||
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[index]));
|
||||
return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].address;
|
||||
case 36:
|
||||
ASSERT(IsAllowed36BitType(type));
|
||||
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[index]));
|
||||
return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].address;
|
||||
case 39:
|
||||
ASSERT(IsAllowed39BitType(type));
|
||||
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
|
||||
const std::size_t index{static_cast<std::size_t>(type)};
|
||||
switch (width) {
|
||||
case 32:
|
||||
ASSERT(IsAllowed32BitType(type));
|
||||
return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].size;
|
||||
case 36:
|
||||
ASSERT(IsAllowed36BitType(type));
|
||||
return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].size;
|
||||
case 39:
|
||||
ASSERT(IsAllowed39BitType(type));
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,34 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
struct AddressSpaceInfo final {
|
||||
enum class Type : u32 {
|
||||
Is32Bit = 0,
|
||||
Small64Bit = 1,
|
||||
Large64Bit = 2,
|
||||
Heap = 3,
|
||||
Stack = 4,
|
||||
Alias = 5,
|
||||
Count,
|
||||
};
|
||||
|
||||
static u64 GetAddressSpaceStart(std::size_t width, Type type);
|
||||
static std::size_t GetAddressSpaceSize(std::size_t width, Type type);
|
||||
|
||||
const std::size_t bit_width{};
|
||||
const std::size_t address{};
|
||||
const std::size_t size{};
|
||||
const Type type{};
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,335 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
enum class MemoryState : u32 {
|
||||
None = 0,
|
||||
Mask = 0xFF,
|
||||
All = ~None,
|
||||
|
||||
FlagCanReprotect = (1 << 8),
|
||||
FlagCanDebug = (1 << 9),
|
||||
FlagCanUseIpc = (1 << 10),
|
||||
FlagCanUseNonDeviceIpc = (1 << 11),
|
||||
FlagCanUseNonSecureIpc = (1 << 12),
|
||||
FlagMapped = (1 << 13),
|
||||
FlagCode = (1 << 14),
|
||||
FlagCanAlias = (1 << 15),
|
||||
FlagCanCodeAlias = (1 << 16),
|
||||
FlagCanTransfer = (1 << 17),
|
||||
FlagCanQueryPhysical = (1 << 18),
|
||||
FlagCanDeviceMap = (1 << 19),
|
||||
FlagCanAlignedDeviceMap = (1 << 20),
|
||||
FlagCanIpcUserBuffer = (1 << 21),
|
||||
FlagReferenceCounted = (1 << 22),
|
||||
FlagCanMapProcess = (1 << 23),
|
||||
FlagCanChangeAttribute = (1 << 24),
|
||||
FlagCanCodeMemory = (1 << 25),
|
||||
|
||||
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
|
||||
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
|
||||
FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer |
|
||||
FlagReferenceCounted | FlagCanChangeAttribute,
|
||||
|
||||
FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
|
||||
FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap |
|
||||
FlagCanAlignedDeviceMap | FlagReferenceCounted,
|
||||
|
||||
FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap,
|
||||
|
||||
Free = static_cast<u32>(Svc::MemoryState::Free),
|
||||
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped,
|
||||
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
|
||||
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
|
||||
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
|
||||
FlagCanCodeMemory,
|
||||
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted,
|
||||
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
|
||||
|
||||
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
|
||||
FlagCanCodeAlias,
|
||||
AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData |
|
||||
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory,
|
||||
|
||||
Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap |
|
||||
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
|
||||
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
ThreadLocal =
|
||||
static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
|
||||
|
||||
Transferred = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
|
||||
FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
SharedTransferred = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
|
||||
FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible),
|
||||
|
||||
NonSecureIpc = static_cast<u32>(Svc::MemoryState::NonSecureIpc) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
NonDeviceIpc =
|
||||
static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
|
||||
|
||||
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
|
||||
FlagReferenceCounted | FlagCanDebug,
|
||||
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
|
||||
|
||||
static_assert(static_cast<u32>(MemoryState::Free) == 0x00000000);
|
||||
static_assert(static_cast<u32>(MemoryState::Io) == 0x00002001);
|
||||
static_assert(static_cast<u32>(MemoryState::Static) == 0x00042002);
|
||||
static_assert(static_cast<u32>(MemoryState::Code) == 0x00DC7E03);
|
||||
static_assert(static_cast<u32>(MemoryState::CodeData) == 0x03FEBD04);
|
||||
static_assert(static_cast<u32>(MemoryState::Normal) == 0x037EBD05);
|
||||
static_assert(static_cast<u32>(MemoryState::Shared) == 0x00402006);
|
||||
static_assert(static_cast<u32>(MemoryState::AliasCode) == 0x00DD7E08);
|
||||
static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09);
|
||||
static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A);
|
||||
static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B);
|
||||
static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C);
|
||||
static_assert(static_cast<u32>(MemoryState::Transferred) == 0x015C3C0D);
|
||||
static_assert(static_cast<u32>(MemoryState::SharedTransferred) == 0x005C380E);
|
||||
static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F);
|
||||
static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010);
|
||||
static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811);
|
||||
static_assert(static_cast<u32>(MemoryState::NonDeviceIpc) == 0x004C2812);
|
||||
static_assert(static_cast<u32>(MemoryState::Kernel) == 0x00002013);
|
||||
static_assert(static_cast<u32>(MemoryState::GeneratedCode) == 0x00402214);
|
||||
static_assert(static_cast<u32>(MemoryState::CodeOut) == 0x00402015);
|
||||
|
||||
enum class MemoryPermission : u8 {
|
||||
None = 0,
|
||||
Mask = static_cast<u8>(~None),
|
||||
|
||||
Read = 1 << 0,
|
||||
Write = 1 << 1,
|
||||
Execute = 1 << 2,
|
||||
|
||||
ReadAndWrite = Read | Write,
|
||||
ReadAndExecute = Read | Execute,
|
||||
|
||||
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
|
||||
Svc::MemoryPermission::Execute),
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission);
|
||||
|
||||
enum class MemoryAttribute : u8 {
|
||||
None = 0x00,
|
||||
Mask = 0x7F,
|
||||
All = Mask,
|
||||
DontCareMask = 0x80,
|
||||
|
||||
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
|
||||
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
|
||||
DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared),
|
||||
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
|
||||
|
||||
IpcAndDeviceMapped = IpcLocked | DeviceShared,
|
||||
LockedAndIpcLocked = Locked | IpcLocked,
|
||||
DeviceSharedAndUncached = DeviceShared | Uncached
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
|
||||
|
||||
static_assert((static_cast<u8>(MemoryAttribute::Mask) &
|
||||
static_cast<u8>(MemoryAttribute::DontCareMask)) == 0);
|
||||
|
||||
struct MemoryInfo {
|
||||
VAddr addr{};
|
||||
std::size_t size{};
|
||||
MemoryState state{};
|
||||
MemoryPermission perm{};
|
||||
MemoryAttribute attribute{};
|
||||
MemoryPermission original_perm{};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
|
||||
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
|
||||
return {
|
||||
addr,
|
||||
size,
|
||||
static_cast<Svc::MemoryState>(state & MemoryState::Mask),
|
||||
static_cast<Svc::MemoryAttribute>(attribute & MemoryAttribute::Mask),
|
||||
static_cast<Svc::MemoryPermission>(perm & MemoryPermission::UserMask),
|
||||
ipc_lock_count,
|
||||
device_use_count,
|
||||
};
|
||||
}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return addr;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return size;
|
||||
}
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return GetSize() / PageSize;
|
||||
}
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
}
|
||||
constexpr VAddr GetLastAddress() const {
|
||||
return GetEndAddress() - 1;
|
||||
}
|
||||
};
|
||||
|
||||
class MemoryBlock final {
|
||||
friend class MemoryBlockManager;
|
||||
|
||||
private:
|
||||
VAddr addr{};
|
||||
std::size_t num_pages{};
|
||||
MemoryState state{MemoryState::None};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
MemoryPermission perm{MemoryPermission::None};
|
||||
MemoryPermission original_perm{MemoryPermission::None};
|
||||
MemoryAttribute attribute{MemoryAttribute::None};
|
||||
|
||||
public:
|
||||
static constexpr int Compare(const MemoryBlock& lhs, const MemoryBlock& rhs) {
|
||||
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||
return -1;
|
||||
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr MemoryBlock() = default;
|
||||
constexpr MemoryBlock(VAddr addr_, std::size_t num_pages_, MemoryState state_,
|
||||
MemoryPermission perm_, MemoryAttribute attribute_)
|
||||
: addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return addr;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetSize() const {
|
||||
return GetNumPages() * PageSize;
|
||||
}
|
||||
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
}
|
||||
|
||||
constexpr VAddr GetLastAddress() const {
|
||||
return GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr MemoryInfo GetMemoryInfo() const {
|
||||
return {
|
||||
GetAddress(), GetSize(), state, perm,
|
||||
attribute, original_perm, ipc_lock_count, device_use_count,
|
||||
};
|
||||
}
|
||||
|
||||
void ShareToDevice(MemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared ||
|
||||
device_use_count == 0);
|
||||
attribute |= MemoryAttribute::DeviceShared;
|
||||
const u16 new_use_count{++device_use_count};
|
||||
ASSERT(new_use_count > 0);
|
||||
}
|
||||
|
||||
void UnshareToDevice(MemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared);
|
||||
const u16 prev_use_count{device_use_count--};
|
||||
ASSERT(prev_use_count > 0);
|
||||
if (prev_use_count == 1) {
|
||||
attribute &= ~MemoryAttribute::DeviceShared;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const {
|
||||
constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask |
|
||||
MemoryAttribute::IpcLocked |
|
||||
MemoryAttribute::DeviceShared};
|
||||
return state == s && perm == p &&
|
||||
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||
}
|
||||
|
||||
constexpr bool HasSameProperties(const MemoryBlock& rhs) const {
|
||||
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
|
||||
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
|
||||
device_use_count == rhs.device_use_count;
|
||||
}
|
||||
|
||||
constexpr bool Contains(VAddr start) const {
|
||||
return GetAddress() <= start && start <= GetEndAddress();
|
||||
}
|
||||
|
||||
constexpr void Add(std::size_t count) {
|
||||
ASSERT(count > 0);
|
||||
ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
|
||||
|
||||
num_pages += count;
|
||||
}
|
||||
|
||||
constexpr void Update(MemoryState new_state, MemoryPermission new_perm,
|
||||
MemoryAttribute new_attribute) {
|
||||
ASSERT(original_perm == MemoryPermission::None);
|
||||
ASSERT((attribute & MemoryAttribute::IpcLocked) == MemoryAttribute::None);
|
||||
|
||||
state = new_state;
|
||||
perm = new_perm;
|
||||
|
||||
attribute = static_cast<MemoryAttribute>(
|
||||
new_attribute |
|
||||
(attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared)));
|
||||
}
|
||||
|
||||
constexpr MemoryBlock Split(VAddr split_addr) {
|
||||
ASSERT(GetAddress() < split_addr);
|
||||
ASSERT(Contains(split_addr));
|
||||
ASSERT(Common::IsAligned(split_addr, PageSize));
|
||||
|
||||
MemoryBlock block;
|
||||
block.addr = addr;
|
||||
block.num_pages = (split_addr - GetAddress()) / PageSize;
|
||||
block.state = state;
|
||||
block.ipc_lock_count = ipc_lock_count;
|
||||
block.device_use_count = device_use_count;
|
||||
block.perm = perm;
|
||||
block.original_perm = original_perm;
|
||||
block.attribute = attribute;
|
||||
|
||||
addr = split_addr;
|
||||
num_pages -= block.num_pages;
|
||||
|
||||
return block;
|
||||
}
|
||||
};
|
||||
static_assert(std::is_trivially_destructible<MemoryBlock>::value);
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,223 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/memory/memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
MemoryBlockManager::MemoryBlockManager(VAddr start_addr, VAddr end_addr)
|
||||
: start_addr{start_addr}, end_addr{end_addr} {
|
||||
const u64 num_pages{(end_addr - start_addr) / PageSize};
|
||||
memory_block_tree.emplace_back(start_addr, num_pages, MemoryState::Free, MemoryPermission::None,
|
||||
MemoryAttribute::None);
|
||||
}
|
||||
|
||||
MemoryBlockManager::iterator MemoryBlockManager::FindIterator(VAddr addr) {
|
||||
auto node{memory_block_tree.begin()};
|
||||
while (node != end()) {
|
||||
const VAddr end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
|
||||
if (node->GetAddress() <= addr && end_addr - 1 >= addr) {
|
||||
return node;
|
||||
}
|
||||
node = std::next(node);
|
||||
}
|
||||
return end();
|
||||
}
|
||||
|
||||
VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||
std::size_t num_pages, std::size_t align, std::size_t offset,
|
||||
std::size_t guard_pages) {
|
||||
if (num_pages == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const VAddr region_end{region_start + region_num_pages * PageSize};
|
||||
const VAddr region_last{region_end - 1};
|
||||
for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
|
||||
const auto info{it->GetMemoryInfo()};
|
||||
if (region_last < info.GetAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (info.state != MemoryState::Free) {
|
||||
continue;
|
||||
}
|
||||
|
||||
VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
|
||||
area += guard_pages * PageSize;
|
||||
|
||||
const VAddr offset_area{Common::AlignDown(area, align) + offset};
|
||||
area = (area <= offset_area) ? offset_area : offset_area + align;
|
||||
|
||||
const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
|
||||
const VAddr area_last{area_end - 1};
|
||||
|
||||
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
||||
area_last <= info.GetLastAddress()) {
|
||||
return area;
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
|
||||
MemoryPermission prev_perm, MemoryAttribute prev_attribute,
|
||||
MemoryState state, MemoryPermission perm,
|
||||
MemoryAttribute attribute) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
prev_attribute |= MemoryAttribute::IpcAndDeviceMapped;
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < end_addr) {
|
||||
if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
|
||||
node = next_node;
|
||||
continue;
|
||||
}
|
||||
|
||||
iterator new_node{node};
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
|
||||
if (end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(end_addr));
|
||||
}
|
||||
|
||||
new_node->Update(state, perm, attribute);
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= end_addr - 1) {
|
||||
break;
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState state,
|
||||
MemoryPermission perm, MemoryAttribute attribute) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < end_addr) {
|
||||
iterator new_node{node};
|
||||
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
|
||||
if (end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(end_addr));
|
||||
}
|
||||
|
||||
new_node->Update(state, perm, attribute);
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= end_addr - 1) {
|
||||
break;
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
MemoryPermission perm) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < end_addr) {
|
||||
iterator new_node{node};
|
||||
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
|
||||
if (end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(end_addr));
|
||||
}
|
||||
|
||||
lock_func(new_node, perm);
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= end_addr - 1) {
|
||||
break;
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
|
||||
const_iterator it{FindIterator(start)};
|
||||
MemoryInfo info{};
|
||||
do {
|
||||
info = it->GetMemoryInfo();
|
||||
func(info);
|
||||
it = std::next(it);
|
||||
} while (info.addr + info.size - 1 < end - 1 && it != cend());
|
||||
}
|
||||
|
||||
void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||
MemoryBlock* block{&(*it)};
|
||||
|
||||
auto EraseIt = [&](const iterator it_to_erase) {
|
||||
if (next_it == it_to_erase) {
|
||||
next_it = std::next(next_it);
|
||||
}
|
||||
memory_block_tree.erase(it_to_erase);
|
||||
};
|
||||
|
||||
if (it != memory_block_tree.begin()) {
|
||||
MemoryBlock* prev{&(*std::prev(it))};
|
||||
|
||||
if (block->HasSameProperties(*prev)) {
|
||||
const iterator prev_it{std::prev(it)};
|
||||
|
||||
prev->Add(block->GetNumPages());
|
||||
EraseIt(it);
|
||||
|
||||
it = prev_it;
|
||||
block = prev;
|
||||
}
|
||||
}
|
||||
|
||||
if (it != cend()) {
|
||||
const MemoryBlock* const next{&(*std::next(it))};
|
||||
|
||||
if (block->HasSameProperties(*next)) {
|
||||
block->Add(next->GetNumPages());
|
||||
EraseIt(std::next(it));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,66 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <list>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class MemoryBlockManager final {
|
||||
public:
|
||||
using MemoryBlockTree = std::list<MemoryBlock>;
|
||||
using iterator = MemoryBlockTree::iterator;
|
||||
using const_iterator = MemoryBlockTree::const_iterator;
|
||||
|
||||
public:
|
||||
MemoryBlockManager(VAddr start_addr, VAddr end_addr);
|
||||
|
||||
iterator end() {
|
||||
return memory_block_tree.end();
|
||||
}
|
||||
const_iterator end() const {
|
||||
return memory_block_tree.end();
|
||||
}
|
||||
const_iterator cend() const {
|
||||
return memory_block_tree.cend();
|
||||
}
|
||||
|
||||
iterator FindIterator(VAddr addr);
|
||||
|
||||
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||
std::size_t align, std::size_t offset, std::size_t guard_pages);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
|
||||
MemoryPermission prev_perm, MemoryAttribute prev_attribute, MemoryState state,
|
||||
MemoryPermission perm, MemoryAttribute attribute);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, MemoryState state,
|
||||
MemoryPermission perm = MemoryPermission::None,
|
||||
MemoryAttribute attribute = MemoryAttribute::None);
|
||||
|
||||
using LockFunc = std::function<void(iterator, MemoryPermission)>;
|
||||
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, MemoryPermission perm);
|
||||
|
||||
using IterateFunc = std::function<void(const MemoryInfo&)>;
|
||||
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
|
||||
|
||||
MemoryBlock& FindBlock(VAddr addr) {
|
||||
return *FindIterator(addr);
|
||||
}
|
||||
|
||||
private:
|
||||
void MergeAdjacent(iterator it, iterator& next_it);
|
||||
|
||||
[[maybe_unused]] const VAddr start_addr;
|
||||
[[maybe_unused]] const VAddr end_addr;
|
||||
|
||||
MemoryBlockTree memory_block_tree;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,90 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39;
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceEnd =
|
||||
KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
|
||||
constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceSize =
|
||||
KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
|
||||
|
||||
constexpr bool IsKernelAddressKey(VAddr key) {
|
||||
return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
|
||||
}
|
||||
|
||||
constexpr bool IsKernelAddress(VAddr address) {
|
||||
return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
|
||||
}
|
||||
|
||||
class MemoryRegion final {
|
||||
friend class MemoryLayout;
|
||||
|
||||
public:
|
||||
constexpr PAddr StartAddress() const {
|
||||
return start_address;
|
||||
}
|
||||
|
||||
constexpr PAddr EndAddress() const {
|
||||
return end_address;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr MemoryRegion() = default;
|
||||
constexpr MemoryRegion(PAddr start_address, PAddr end_address)
|
||||
: start_address{start_address}, end_address{end_address} {}
|
||||
|
||||
const PAddr start_address{};
|
||||
const PAddr end_address{};
|
||||
};
|
||||
|
||||
class MemoryLayout final {
|
||||
public:
|
||||
constexpr const MemoryRegion& Application() const {
|
||||
return application;
|
||||
}
|
||||
|
||||
constexpr const MemoryRegion& Applet() const {
|
||||
return applet;
|
||||
}
|
||||
|
||||
constexpr const MemoryRegion& System() const {
|
||||
return system;
|
||||
}
|
||||
|
||||
static constexpr MemoryLayout GetDefaultLayout() {
|
||||
constexpr std::size_t application_size{0xcd500000};
|
||||
constexpr std::size_t applet_size{0x1fb00000};
|
||||
constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size};
|
||||
constexpr PAddr application_end_address{Core::DramMemoryMap::End};
|
||||
constexpr PAddr applet_start_address{application_start_address - applet_size};
|
||||
constexpr PAddr applet_end_address{applet_start_address + applet_size};
|
||||
constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd};
|
||||
constexpr PAddr system_end_address{applet_start_address};
|
||||
return {application_start_address, application_end_address, applet_start_address,
|
||||
applet_end_address, system_start_address, system_end_address};
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr MemoryLayout(PAddr application_start_address, std::size_t application_size,
|
||||
PAddr applet_start_address, std::size_t applet_size,
|
||||
PAddr system_start_address, std::size_t system_size)
|
||||
: application{application_start_address, application_size},
|
||||
applet{applet_start_address, applet_size}, system{system_start_address, system_size} {}
|
||||
|
||||
const MemoryRegion application;
|
||||
const MemoryRegion applet;
|
||||
const MemoryRegion system;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,175 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
#include "core/hle/kernel/memory/page_linked_list.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
|
||||
const auto size{end_address - start_address};
|
||||
|
||||
// Calculate metadata sizes
|
||||
const auto ref_count_size{(size / PageSize) * sizeof(u16)};
|
||||
const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)};
|
||||
const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
|
||||
const auto page_heap_size{PageHeap::CalculateMetadataOverheadSize(size)};
|
||||
const auto total_metadata_size{manager_size + page_heap_size};
|
||||
ASSERT(manager_size <= total_metadata_size);
|
||||
ASSERT(Common::IsAligned(total_metadata_size, PageSize));
|
||||
|
||||
// Setup region
|
||||
pool = new_pool;
|
||||
|
||||
// Initialize the manager's KPageHeap
|
||||
heap.Initialize(start_address, size, page_heap_size);
|
||||
|
||||
// Free the memory to the heap
|
||||
heap.Free(start_address, size / PageSize);
|
||||
|
||||
// Update the heap's used size
|
||||
heap.UpdateUsedSize();
|
||||
|
||||
return total_metadata_size;
|
||||
}
|
||||
|
||||
void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
|
||||
ASSERT(pool < Pool::Count);
|
||||
managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
|
||||
}
|
||||
|
||||
VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
|
||||
Direction dir) {
|
||||
// Early return if we're allocating no pages
|
||||
if (num_pages == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// Lock the pool that we're allocating from
|
||||
const auto pool_index{static_cast<std::size_t>(pool)};
|
||||
std::lock_guard lock{pool_locks[pool_index]};
|
||||
|
||||
// Choose a heap based on our page size request
|
||||
const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
|
||||
|
||||
// Loop, trying to iterate from each block
|
||||
// TODO (bunnei): Support multiple managers
|
||||
Impl& chosen_manager{managers[pool_index]};
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(heap_index)};
|
||||
|
||||
// If we failed to allocate, quit now
|
||||
if (!allocated_block) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// If we allocated more than we need, free some
|
||||
const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
|
||||
if (allocated_pages > num_pages) {
|
||||
chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
||||
}
|
||||
|
||||
return allocated_block;
|
||||
}
|
||||
|
||||
ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
ASSERT(page_list.GetNumPages() == 0);
|
||||
|
||||
// Early return if we're allocating no pages
|
||||
if (num_pages == 0) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
// Lock the pool that we're allocating from
|
||||
const auto pool_index{static_cast<std::size_t>(pool)};
|
||||
std::lock_guard lock{pool_locks[pool_index]};
|
||||
|
||||
// Choose a heap based on our page size request
|
||||
const s32 heap_index{PageHeap::GetBlockIndex(num_pages)};
|
||||
if (heap_index < 0) {
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
|
||||
// TODO (bunnei): Support multiple managers
|
||||
Impl& chosen_manager{managers[pool_index]};
|
||||
|
||||
// Ensure that we don't leave anything un-freed
|
||||
auto group_guard = detail::ScopeExit([&] {
|
||||
for (const auto& it : page_list.Nodes()) {
|
||||
const auto min_num_pages{std::min<size_t>(
|
||||
it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
|
||||
chosen_manager.Free(it.GetAddress(), min_num_pages);
|
||||
}
|
||||
});
|
||||
|
||||
// Keep allocating until we've allocated all our pages
|
||||
for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
|
||||
const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)};
|
||||
|
||||
while (num_pages >= pages_per_alloc) {
|
||||
// Allocate a block
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(index)};
|
||||
if (!allocated_block) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Safely add it to our group
|
||||
{
|
||||
auto block_guard = detail::ScopeExit(
|
||||
[&] { chosen_manager.Free(allocated_block, pages_per_alloc); });
|
||||
|
||||
if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
block_guard.Cancel();
|
||||
}
|
||||
|
||||
num_pages -= pages_per_alloc;
|
||||
}
|
||||
}
|
||||
|
||||
// Only succeed if we allocated as many pages as we wanted
|
||||
if (num_pages) {
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
|
||||
// We succeeded!
|
||||
group_guard.Cancel();
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
// Early return if we're freeing no pages
|
||||
if (!num_pages) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
// Lock the pool that we're freeing from
|
||||
const auto pool_index{static_cast<std::size_t>(pool)};
|
||||
std::lock_guard lock{pool_locks[pool_index]};
|
||||
|
||||
// TODO (bunnei): Support multiple managers
|
||||
Impl& chosen_manager{managers[pool_index]};
|
||||
|
||||
// Free all of the pages
|
||||
for (const auto& it : page_list.Nodes()) {
|
||||
const auto min_num_pages{std::min<size_t>(
|
||||
it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
|
||||
chosen_manager.Free(it.GetAddress(), min_num_pages);
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,96 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <mutex>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/page_heap.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class PageLinkedList;
|
||||
|
||||
class MemoryManager final : NonCopyable {
|
||||
public:
|
||||
enum class Pool : u32 {
|
||||
Application = 0,
|
||||
Applet = 1,
|
||||
System = 2,
|
||||
SystemNonSecure = 3,
|
||||
|
||||
Count,
|
||||
|
||||
Shift = 4,
|
||||
Mask = (0xF << Shift),
|
||||
};
|
||||
|
||||
enum class Direction : u32 {
|
||||
FromFront = 0,
|
||||
FromBack = 1,
|
||||
|
||||
Shift = 0,
|
||||
Mask = (0xF << Shift),
|
||||
};
|
||||
|
||||
MemoryManager() = default;
|
||||
|
||||
constexpr std::size_t GetSize(Pool pool) const {
|
||||
return managers[static_cast<std::size_t>(pool)].GetSize();
|
||||
}
|
||||
|
||||
void InitializeManager(Pool pool, u64 start_address, u64 end_address);
|
||||
VAddr AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
ResultCode Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
ResultCode Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
|
||||
static constexpr std::size_t MaxManagerCount = 10;
|
||||
|
||||
private:
|
||||
class Impl final : NonCopyable {
|
||||
private:
|
||||
using RefCount = u16;
|
||||
|
||||
private:
|
||||
PageHeap heap;
|
||||
Pool pool{};
|
||||
|
||||
public:
|
||||
Impl() = default;
|
||||
|
||||
std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
|
||||
|
||||
VAddr AllocateBlock(s32 index) {
|
||||
return heap.AllocateBlock(index);
|
||||
}
|
||||
|
||||
void Free(VAddr addr, std::size_t num_pages) {
|
||||
heap.Free(addr, num_pages);
|
||||
}
|
||||
|
||||
constexpr std::size_t GetSize() const {
|
||||
return heap.GetSize();
|
||||
}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return heap.GetAddress();
|
||||
}
|
||||
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return heap.GetEndAddress();
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks;
|
||||
std::array<Impl, MaxManagerCount> managers;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,18 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
constexpr std::size_t PageBits{12};
|
||||
constexpr std::size_t PageSize{1 << PageBits};
|
||||
|
||||
using Page = std::array<u8, PageSize>;
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,119 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/memory/page_heap.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
|
||||
// Check our assumptions
|
||||
ASSERT(Common::IsAligned((address), PageSize));
|
||||
ASSERT(Common::IsAligned(size, PageSize));
|
||||
|
||||
// Set our members
|
||||
heap_address = address;
|
||||
heap_size = size;
|
||||
|
||||
// Setup bitmaps
|
||||
metadata.resize(metadata_size / sizeof(u64));
|
||||
u64* cur_bitmap_storage{metadata.data()};
|
||||
for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
|
||||
const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
|
||||
const std::size_t next_block_shift{
|
||||
(i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
|
||||
cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift,
|
||||
next_block_shift, cur_bitmap_storage);
|
||||
}
|
||||
}
|
||||
|
||||
VAddr PageHeap::AllocateBlock(s32 index) {
|
||||
const std::size_t needed_size{blocks[index].GetSize()};
|
||||
|
||||
for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
|
||||
if (const VAddr addr{blocks[i].PopBlock()}; addr) {
|
||||
if (const std::size_t allocated_size{blocks[i].GetSize()};
|
||||
allocated_size > needed_size) {
|
||||
Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void PageHeap::FreeBlock(VAddr block, s32 index) {
|
||||
do {
|
||||
block = blocks[index++].PushBlock(block);
|
||||
} while (block != 0);
|
||||
}
|
||||
|
||||
void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
||||
// Freeing no pages is a no-op
|
||||
if (num_pages == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Find the largest block size that we can free, and free as many as possible
|
||||
s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1};
|
||||
const VAddr start{addr};
|
||||
const VAddr end{(num_pages * PageSize) + addr};
|
||||
VAddr before_start{start};
|
||||
VAddr before_end{start};
|
||||
VAddr after_start{end};
|
||||
VAddr after_end{end};
|
||||
while (big_index >= 0) {
|
||||
const std::size_t block_size{blocks[big_index].GetSize()};
|
||||
const VAddr big_start{Common::AlignUp((start), block_size)};
|
||||
const VAddr big_end{Common::AlignDown((end), block_size)};
|
||||
if (big_start < big_end) {
|
||||
// Free as many big blocks as we can
|
||||
for (auto block{big_start}; block < big_end; block += block_size) {
|
||||
FreeBlock(block, big_index);
|
||||
}
|
||||
before_end = big_start;
|
||||
after_start = big_end;
|
||||
break;
|
||||
}
|
||||
big_index--;
|
||||
}
|
||||
ASSERT(big_index >= 0);
|
||||
|
||||
// Free space before the big blocks
|
||||
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||
const std::size_t block_size{blocks[i].GetSize()};
|
||||
while (before_start + block_size <= before_end) {
|
||||
before_end -= block_size;
|
||||
FreeBlock(before_end, i);
|
||||
}
|
||||
}
|
||||
|
||||
// Free space after the big blocks
|
||||
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||
const std::size_t block_size{blocks[i].GetSize()};
|
||||
while (after_start + block_size <= after_end) {
|
||||
FreeBlock(after_start, i);
|
||||
after_start += block_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::size_t PageHeap::CalculateMetadataOverheadSize(std::size_t region_size) {
|
||||
std::size_t overhead_size = 0;
|
||||
for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
|
||||
const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
|
||||
const std::size_t next_block_shift{
|
||||
(i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
|
||||
overhead_size += PageHeap::Block::CalculateMetadataOverheadSize(
|
||||
region_size, cur_block_shift, next_block_shift);
|
||||
}
|
||||
return Common::AlignUp(overhead_size, PageSize);
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,370 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
#include <vector>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class PageHeap final : NonCopyable {
|
||||
public:
|
||||
static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
|
||||
const auto target_pages{std::max(num_pages, align_pages)};
|
||||
for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||
if (target_pages <=
|
||||
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return static_cast<s32>(i);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
|
||||
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
|
||||
if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockSize(std::size_t index) {
|
||||
return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockNumPages(std::size_t index) {
|
||||
return GetBlockSize(index) / PageSize;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr std::size_t NumMemoryBlockPageShifts{7};
|
||||
static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
|
||||
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
|
||||
};
|
||||
|
||||
class Block final : NonCopyable {
|
||||
private:
|
||||
class Bitmap final : NonCopyable {
|
||||
public:
|
||||
static constexpr std::size_t MaxDepth{4};
|
||||
|
||||
private:
|
||||
std::array<u64*, MaxDepth> bit_storages{};
|
||||
std::size_t num_bits{};
|
||||
std::size_t used_depths{};
|
||||
|
||||
public:
|
||||
constexpr Bitmap() = default;
|
||||
|
||||
constexpr std::size_t GetNumBits() const {
|
||||
return num_bits;
|
||||
}
|
||||
constexpr s32 GetHighestDepthIndex() const {
|
||||
return static_cast<s32>(used_depths) - 1;
|
||||
}
|
||||
|
||||
constexpr u64* Initialize(u64* storage, std::size_t size) {
|
||||
//* Initially, everything is un-set
|
||||
num_bits = 0;
|
||||
|
||||
// Calculate the needed bitmap depth
|
||||
used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
|
||||
ASSERT(used_depths <= MaxDepth);
|
||||
|
||||
// Set the bitmap pointers
|
||||
for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
|
||||
bit_storages[depth] = storage;
|
||||
size = Common::AlignUp(size, 64) / 64;
|
||||
storage += size;
|
||||
}
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
||||
s64 FindFreeBlock() const {
|
||||
uintptr_t offset{};
|
||||
s32 depth{};
|
||||
|
||||
do {
|
||||
const u64 v{bit_storages[depth][offset]};
|
||||
if (v == 0) {
|
||||
// Non-zero depth indicates that a previous level had a free block
|
||||
ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * 64 + static_cast<u32>(std::countr_zero(v));
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(used_depths));
|
||||
|
||||
return static_cast<s64>(offset);
|
||||
}
|
||||
|
||||
constexpr void SetBit(std::size_t offset) {
|
||||
SetBit(GetHighestDepthIndex(), offset);
|
||||
num_bits++;
|
||||
}
|
||||
|
||||
constexpr void ClearBit(std::size_t offset) {
|
||||
ClearBit(GetHighestDepthIndex(), offset);
|
||||
num_bits--;
|
||||
}
|
||||
|
||||
constexpr bool ClearRange(std::size_t offset, std::size_t count) {
|
||||
const s32 depth{GetHighestDepthIndex()};
|
||||
const auto bit_ind{offset / 64};
|
||||
u64* bits{bit_storages[depth]};
|
||||
if (count < 64) {
|
||||
const auto shift{offset % 64};
|
||||
ASSERT(shift + count <= 64);
|
||||
// Check that all the bits are set
|
||||
const u64 mask{((1ULL << count) - 1) << shift};
|
||||
u64 v{bits[bit_ind]};
|
||||
if ((v & mask) != mask) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Clear the bits
|
||||
v &= ~mask;
|
||||
bits[bit_ind] = v;
|
||||
if (v == 0) {
|
||||
ClearBit(depth - 1, bit_ind);
|
||||
}
|
||||
} else {
|
||||
ASSERT(offset % 64 == 0);
|
||||
ASSERT(count % 64 == 0);
|
||||
// Check that all the bits are set
|
||||
std::size_t remaining{count};
|
||||
std::size_t i = 0;
|
||||
do {
|
||||
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||
return false;
|
||||
}
|
||||
remaining -= 64;
|
||||
} while (remaining > 0);
|
||||
|
||||
// Clear the bits
|
||||
remaining = count;
|
||||
i = 0;
|
||||
do {
|
||||
bits[bit_ind + i] = 0;
|
||||
ClearBit(depth - 1, bit_ind + i);
|
||||
i++;
|
||||
remaining -= 64;
|
||||
} while (remaining > 0);
|
||||
}
|
||||
|
||||
num_bits -= count;
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr void SetBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
const auto ind{offset / 64};
|
||||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
const u64 v{*bit};
|
||||
ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void ClearBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
const auto ind{offset / 64};
|
||||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
u64 v{*bit};
|
||||
ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr s32 GetRequiredDepth(std::size_t region_size) {
|
||||
s32 depth = 0;
|
||||
while (true) {
|
||||
region_size /= 64;
|
||||
depth++;
|
||||
if (region_size == 0) {
|
||||
return depth;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size) {
|
||||
std::size_t overhead_bits = 0;
|
||||
for (s32 depth{GetRequiredDepth(region_size) - 1}; depth >= 0; depth--) {
|
||||
region_size = Common::AlignUp(region_size, 64) / 64;
|
||||
overhead_bits += region_size;
|
||||
}
|
||||
return overhead_bits * sizeof(u64);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
Bitmap bitmap;
|
||||
VAddr heap_address{};
|
||||
uintptr_t end_offset{};
|
||||
std::size_t block_shift{};
|
||||
std::size_t next_block_shift{};
|
||||
|
||||
public:
|
||||
constexpr Block() = default;
|
||||
|
||||
constexpr std::size_t GetShift() const {
|
||||
return block_shift;
|
||||
}
|
||||
constexpr std::size_t GetNextShift() const {
|
||||
return next_block_shift;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return static_cast<std::size_t>(1) << GetShift();
|
||||
}
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return GetSize() / PageSize;
|
||||
}
|
||||
constexpr std::size_t GetNumFreeBlocks() const {
|
||||
return bitmap.GetNumBits();
|
||||
}
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
return GetNumFreeBlocks() * GetNumPages();
|
||||
}
|
||||
|
||||
constexpr u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
|
||||
u64* bit_storage) {
|
||||
// Set shifts
|
||||
block_shift = bs;
|
||||
next_block_shift = nbs;
|
||||
|
||||
// Align up the address
|
||||
VAddr end{addr + size};
|
||||
const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
|
||||
: (1ULL << block_shift)};
|
||||
addr = Common::AlignDown((addr), align);
|
||||
end = Common::AlignUp((end), align);
|
||||
|
||||
heap_address = addr;
|
||||
end_offset = (end - addr) / (1ULL << block_shift);
|
||||
return bitmap.Initialize(bit_storage, end_offset);
|
||||
}
|
||||
|
||||
constexpr VAddr PushBlock(VAddr address) {
|
||||
// Set the bit for the free block
|
||||
std::size_t offset{(address - heap_address) >> GetShift()};
|
||||
bitmap.SetBit(offset);
|
||||
|
||||
// If we have a next shift, try to clear the blocks below and return the address
|
||||
if (GetNextShift()) {
|
||||
const auto diff{1ULL << (GetNextShift() - GetShift())};
|
||||
offset = Common::AlignDown(offset, diff);
|
||||
if (bitmap.ClearRange(offset, diff)) {
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
}
|
||||
|
||||
// We couldn't coalesce, or we're already as big as possible
|
||||
return 0;
|
||||
}
|
||||
|
||||
VAddr PopBlock() {
|
||||
// Find a free block
|
||||
const s64 soffset{bitmap.FindFreeBlock()};
|
||||
if (soffset < 0) {
|
||||
return 0;
|
||||
}
|
||||
const auto offset{static_cast<std::size_t>(soffset)};
|
||||
|
||||
// Update our tracking and return it
|
||||
bitmap.ClearBit(offset);
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size,
|
||||
std::size_t cur_block_shift,
|
||||
std::size_t next_block_shift) {
|
||||
const auto cur_block_size{(1ULL << cur_block_shift)};
|
||||
const auto next_block_size{(1ULL << next_block_shift)};
|
||||
const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
|
||||
return Bitmap::CalculateMetadataOverheadSize(
|
||||
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
PageHeap() = default;
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return heap_address;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return heap_size;
|
||||
}
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
}
|
||||
constexpr std::size_t GetPageOffset(VAddr block) const {
|
||||
return (block - GetAddress()) / PageSize;
|
||||
}
|
||||
|
||||
void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
|
||||
VAddr AllocateBlock(s32 index);
|
||||
void Free(VAddr addr, std::size_t num_pages);
|
||||
|
||||
void UpdateUsedSize() {
|
||||
used_size = heap_size - (GetNumFreePages() * PageSize);
|
||||
}
|
||||
|
||||
static std::size_t CalculateMetadataOverheadSize(std::size_t region_size);
|
||||
|
||||
private:
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
std::size_t num_free{};
|
||||
|
||||
for (const auto& block : blocks) {
|
||||
num_free += block.GetNumFreePages();
|
||||
}
|
||||
|
||||
return num_free;
|
||||
}
|
||||
|
||||
void FreeBlock(VAddr block, s32 index);
|
||||
|
||||
VAddr heap_address{};
|
||||
std::size_t heap_size{};
|
||||
std::size_t used_size{};
|
||||
std::array<Block, NumMemoryBlockPageShifts> blocks{};
|
||||
std::vector<u64> metadata;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,92 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class PageLinkedList final {
|
||||
public:
|
||||
class Node final {
|
||||
public:
|
||||
constexpr Node(u64 addr, std::size_t num_pages) : addr{addr}, num_pages{num_pages} {}
|
||||
|
||||
constexpr u64 GetAddress() const {
|
||||
return addr;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
private:
|
||||
u64 addr{};
|
||||
std::size_t num_pages{};
|
||||
};
|
||||
|
||||
public:
|
||||
PageLinkedList() = default;
|
||||
PageLinkedList(u64 address, u64 num_pages) {
|
||||
ASSERT(AddBlock(address, num_pages).IsSuccess());
|
||||
}
|
||||
|
||||
constexpr std::list<Node>& Nodes() {
|
||||
return nodes;
|
||||
}
|
||||
|
||||
constexpr const std::list<Node>& Nodes() const {
|
||||
return nodes;
|
||||
}
|
||||
|
||||
std::size_t GetNumPages() const {
|
||||
std::size_t num_pages = 0;
|
||||
for (const Node& node : nodes) {
|
||||
num_pages += node.GetNumPages();
|
||||
}
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
bool IsEqual(PageLinkedList& other) const {
|
||||
auto this_node = nodes.begin();
|
||||
auto other_node = other.nodes.begin();
|
||||
while (this_node != nodes.end() && other_node != other.nodes.end()) {
|
||||
if (this_node->GetAddress() != other_node->GetAddress() ||
|
||||
this_node->GetNumPages() != other_node->GetNumPages()) {
|
||||
return false;
|
||||
}
|
||||
this_node = std::next(this_node);
|
||||
other_node = std::next(other_node);
|
||||
}
|
||||
|
||||
return this_node == nodes.end() && other_node == other.nodes.end();
|
||||
}
|
||||
|
||||
ResultCode AddBlock(u64 address, u64 num_pages) {
|
||||
if (!num_pages) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
if (!nodes.empty()) {
|
||||
const auto node = nodes.back();
|
||||
if (node.GetAddress() + node.GetNumPages() * PageSize == address) {
|
||||
address = node.GetAddress();
|
||||
num_pages += node.GetNumPages();
|
||||
nodes.pop_back();
|
||||
}
|
||||
}
|
||||
nodes.push_back({address, num_pages});
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
private:
|
||||
std::list<Node> nodes;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
File diff suppressed because it is too large
Load Diff
@@ -1,277 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/page_table.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class MemoryBlockManager;
|
||||
|
||||
class PageTable final : NonCopyable {
|
||||
public:
|
||||
explicit PageTable(Core::System& system);
|
||||
|
||||
ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size,
|
||||
Memory::MemoryManager::Pool pool);
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, MemoryState state,
|
||||
MemoryPermission perm);
|
||||
ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
ResultCode UnmapMemory(VAddr addr, std::size_t size);
|
||||
ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state,
|
||||
MemoryPermission perm);
|
||||
ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm);
|
||||
MemoryInfo QueryInfo(VAddr addr);
|
||||
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm);
|
||||
ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
|
||||
ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask,
|
||||
MemoryAttribute value);
|
||||
ResultCode SetHeapCapacity(std::size_t new_heap_capacity);
|
||||
ResultVal<VAddr> SetHeapSize(std::size_t size);
|
||||
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
|
||||
bool is_map_only, VAddr region_start,
|
||||
std::size_t region_num_pages, MemoryState state,
|
||||
MemoryPermission perm, PAddr map_addr = 0);
|
||||
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
|
||||
Common::PageTable& PageTableImpl() {
|
||||
return page_table_impl;
|
||||
}
|
||||
|
||||
const Common::PageTable& PageTableImpl() const {
|
||||
return page_table_impl;
|
||||
}
|
||||
|
||||
private:
|
||||
enum class OperationType : u32 {
|
||||
Map,
|
||||
MapGroup,
|
||||
Unmap,
|
||||
ChangePermissions,
|
||||
ChangePermissionsAndRefresh,
|
||||
};
|
||||
|
||||
static constexpr MemoryAttribute DefaultMemoryIgnoreAttr =
|
||||
MemoryAttribute::DontCareMask | MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared;
|
||||
|
||||
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
|
||||
ResultCode MapPages(VAddr addr, const PageLinkedList& page_linked_list, MemoryPermission perm);
|
||||
void MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end);
|
||||
bool IsRegionMapped(VAddr address, u64 size);
|
||||
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, PageLinkedList& page_linked_list);
|
||||
MemoryInfo QueryInfoImpl(VAddr addr);
|
||||
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
|
||||
std::size_t align);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group,
|
||||
OperationType operation);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr = 0);
|
||||
constexpr VAddr GetRegionAddress(MemoryState state) const;
|
||||
constexpr std::size_t GetRegionSize(MemoryState state) const;
|
||||
constexpr bool CanContain(VAddr addr, std::size_t size, MemoryState state) const;
|
||||
|
||||
constexpr ResultCode CheckMemoryState(const MemoryInfo& info, MemoryState state_mask,
|
||||
MemoryState state, MemoryPermission perm_mask,
|
||||
MemoryPermission perm, MemoryAttribute attr_mask,
|
||||
MemoryAttribute attr) const;
|
||||
ResultCode CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm,
|
||||
MemoryAttribute* out_attr, VAddr addr, std::size_t size,
|
||||
MemoryState state_mask, MemoryState state,
|
||||
MemoryPermission perm_mask, MemoryPermission perm,
|
||||
MemoryAttribute attr_mask, MemoryAttribute attr,
|
||||
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr);
|
||||
ResultCode CheckMemoryState(VAddr addr, std::size_t size, MemoryState state_mask,
|
||||
MemoryState state, MemoryPermission perm_mask,
|
||||
MemoryPermission perm, MemoryAttribute attr_mask,
|
||||
MemoryAttribute attr,
|
||||
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) {
|
||||
return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr, ignore_attr);
|
||||
}
|
||||
|
||||
std::recursive_mutex page_table_lock;
|
||||
std::unique_ptr<MemoryBlockManager> block_manager;
|
||||
|
||||
public:
|
||||
constexpr VAddr GetAddressSpaceStart() const {
|
||||
return address_space_start;
|
||||
}
|
||||
constexpr VAddr GetAddressSpaceEnd() const {
|
||||
return address_space_end;
|
||||
}
|
||||
constexpr std::size_t GetAddressSpaceSize() const {
|
||||
return address_space_end - address_space_start;
|
||||
}
|
||||
constexpr VAddr GetHeapRegionStart() const {
|
||||
return heap_region_start;
|
||||
}
|
||||
constexpr VAddr GetHeapRegionEnd() const {
|
||||
return heap_region_end;
|
||||
}
|
||||
constexpr std::size_t GetHeapRegionSize() const {
|
||||
return heap_region_end - heap_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasRegionStart() const {
|
||||
return alias_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasRegionEnd() const {
|
||||
return alias_region_end;
|
||||
}
|
||||
constexpr std::size_t GetAliasRegionSize() const {
|
||||
return alias_region_end - alias_region_start;
|
||||
}
|
||||
constexpr VAddr GetStackRegionStart() const {
|
||||
return stack_region_start;
|
||||
}
|
||||
constexpr VAddr GetStackRegionEnd() const {
|
||||
return stack_region_end;
|
||||
}
|
||||
constexpr std::size_t GetStackRegionSize() const {
|
||||
return stack_region_end - stack_region_start;
|
||||
}
|
||||
constexpr VAddr GetKernelMapRegionStart() const {
|
||||
return kernel_map_region_start;
|
||||
}
|
||||
constexpr VAddr GetKernelMapRegionEnd() const {
|
||||
return kernel_map_region_end;
|
||||
}
|
||||
constexpr VAddr GetCodeRegionStart() const {
|
||||
return code_region_start;
|
||||
}
|
||||
constexpr VAddr GetCodeRegionEnd() const {
|
||||
return code_region_end;
|
||||
}
|
||||
constexpr VAddr GetAliasCodeRegionStart() const {
|
||||
return alias_code_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasCodeRegionSize() const {
|
||||
return alias_code_region_end - alias_code_region_start;
|
||||
}
|
||||
constexpr std::size_t GetAddressSpaceWidth() const {
|
||||
return address_space_width;
|
||||
}
|
||||
constexpr std::size_t GetHeapSize() {
|
||||
return current_heap_addr - heap_region_start;
|
||||
}
|
||||
constexpr std::size_t GetTotalHeapSize() {
|
||||
return GetHeapSize() + physical_memory_usage;
|
||||
}
|
||||
constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
|
||||
return address_space_start <= address && address + size - 1 <= address_space_end - 1;
|
||||
}
|
||||
constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
|
||||
return alias_region_start > address || address + size - 1 > alias_region_end - 1;
|
||||
}
|
||||
constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
|
||||
return stack_region_start > address || address + size - 1 > stack_region_end - 1;
|
||||
}
|
||||
constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
|
||||
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
|
||||
}
|
||||
constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
|
||||
return address + size > heap_region_start && heap_region_end > address;
|
||||
}
|
||||
constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
|
||||
return address + size > alias_region_start && alias_region_end > address;
|
||||
}
|
||||
constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
|
||||
if (IsInvalidRegion(address, size)) {
|
||||
return true;
|
||||
}
|
||||
if (IsInsideHeapRegion(address, size)) {
|
||||
return true;
|
||||
}
|
||||
if (IsInsideAliasRegion(address, size)) {
|
||||
return true;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
|
||||
return !IsOutsideASLRRegion(address, size);
|
||||
}
|
||||
constexpr PAddr GetPhysicalAddr(VAddr addr) {
|
||||
return page_table_impl.backing_addr[addr >> Memory::PageBits] + addr;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr bool Contains(VAddr addr) const {
|
||||
return address_space_start <= addr && addr <= address_space_end - 1;
|
||||
}
|
||||
constexpr bool Contains(VAddr addr, std::size_t size) const {
|
||||
return address_space_start <= addr && addr < addr + size &&
|
||||
addr + size - 1 <= address_space_end - 1;
|
||||
}
|
||||
constexpr bool IsKernel() const {
|
||||
return is_kernel;
|
||||
}
|
||||
constexpr bool IsAslrEnabled() const {
|
||||
return is_aslr_enabled;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetNumGuardPages() const {
|
||||
return IsKernel() ? 1 : 4;
|
||||
}
|
||||
|
||||
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
|
||||
return (address_space_start <= addr) &&
|
||||
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
|
||||
(addr + num_pages * PageSize - 1 <= address_space_end - 1);
|
||||
}
|
||||
|
||||
private:
|
||||
VAddr address_space_start{};
|
||||
VAddr address_space_end{};
|
||||
VAddr heap_region_start{};
|
||||
VAddr heap_region_end{};
|
||||
VAddr current_heap_end{};
|
||||
VAddr alias_region_start{};
|
||||
VAddr alias_region_end{};
|
||||
VAddr stack_region_start{};
|
||||
VAddr stack_region_end{};
|
||||
VAddr kernel_map_region_start{};
|
||||
VAddr kernel_map_region_end{};
|
||||
VAddr code_region_start{};
|
||||
VAddr code_region_end{};
|
||||
VAddr alias_code_region_start{};
|
||||
VAddr alias_code_region_end{};
|
||||
VAddr current_heap_addr{};
|
||||
|
||||
std::size_t heap_capacity{};
|
||||
std::size_t physical_memory_usage{};
|
||||
std::size_t max_heap_size{};
|
||||
std::size_t max_physical_memory_size{};
|
||||
std::size_t address_space_width{};
|
||||
|
||||
bool is_kernel{};
|
||||
bool is_aslr_enabled{};
|
||||
|
||||
MemoryManager::Pool memory_pool{MemoryManager::Pool::Application};
|
||||
|
||||
Common::PageTable page_table_impl;
|
||||
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,163 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class SlabHeapImpl final : NonCopyable {
|
||||
public:
|
||||
struct Node {
|
||||
Node* next{};
|
||||
};
|
||||
|
||||
constexpr SlabHeapImpl() = default;
|
||||
|
||||
void Initialize(std::size_t size) {
|
||||
ASSERT(head == nullptr);
|
||||
obj_size = size;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetObjectSize() const {
|
||||
return obj_size;
|
||||
}
|
||||
|
||||
Node* GetHead() const {
|
||||
return head;
|
||||
}
|
||||
|
||||
void* Allocate() {
|
||||
Node* ret = head.load();
|
||||
|
||||
do {
|
||||
if (ret == nullptr) {
|
||||
break;
|
||||
}
|
||||
} while (!head.compare_exchange_weak(ret, ret->next));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void Free(void* obj) {
|
||||
Node* node = static_cast<Node*>(obj);
|
||||
|
||||
Node* cur_head = head.load();
|
||||
do {
|
||||
node->next = cur_head;
|
||||
} while (!head.compare_exchange_weak(cur_head, node));
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<Node*> head{};
|
||||
std::size_t obj_size{};
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
class SlabHeapBase : NonCopyable {
|
||||
public:
|
||||
constexpr SlabHeapBase() = default;
|
||||
|
||||
constexpr bool Contains(uintptr_t addr) const {
|
||||
return start <= addr && addr < end;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetSlabHeapSize() const {
|
||||
return (end - start) / GetObjectSize();
|
||||
}
|
||||
|
||||
constexpr std::size_t GetObjectSize() const {
|
||||
return impl.GetObjectSize();
|
||||
}
|
||||
|
||||
constexpr uintptr_t GetSlabHeapAddress() const {
|
||||
return start;
|
||||
}
|
||||
|
||||
std::size_t GetObjectIndexImpl(const void* obj) const {
|
||||
return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
|
||||
}
|
||||
|
||||
std::size_t GetPeakIndex() const {
|
||||
return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
|
||||
}
|
||||
|
||||
void* AllocateImpl() {
|
||||
return impl.Allocate();
|
||||
}
|
||||
|
||||
void FreeImpl(void* obj) {
|
||||
// Don't allow freeing an object that wasn't allocated from this heap
|
||||
ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
|
||||
impl.Free(obj);
|
||||
}
|
||||
|
||||
void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
|
||||
// Ensure we don't initialize a slab using null memory
|
||||
ASSERT(memory != nullptr);
|
||||
|
||||
// Initialize the base allocator
|
||||
impl.Initialize(obj_size);
|
||||
|
||||
// Set our tracking variables
|
||||
const std::size_t num_obj = (memory_size / obj_size);
|
||||
start = reinterpret_cast<uintptr_t>(memory);
|
||||
end = start + num_obj * obj_size;
|
||||
peak = start;
|
||||
|
||||
// Free the objects
|
||||
u8* cur = reinterpret_cast<u8*>(end);
|
||||
|
||||
for (std::size_t i{}; i < num_obj; i++) {
|
||||
cur -= obj_size;
|
||||
impl.Free(cur);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
using Impl = impl::SlabHeapImpl;
|
||||
|
||||
Impl impl;
|
||||
uintptr_t peak{};
|
||||
uintptr_t start{};
|
||||
uintptr_t end{};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class SlabHeap final : public SlabHeapBase {
|
||||
public:
|
||||
constexpr SlabHeap() : SlabHeapBase() {}
|
||||
|
||||
void Initialize(void* memory, std::size_t memory_size) {
|
||||
InitializeImpl(sizeof(T), memory, memory_size);
|
||||
}
|
||||
|
||||
T* Allocate() {
|
||||
T* obj = static_cast<T*>(AllocateImpl());
|
||||
if (obj != nullptr) {
|
||||
new (obj) T();
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
void Free(T* obj) {
|
||||
FreeImpl(obj);
|
||||
}
|
||||
|
||||
constexpr std::size_t GetObjectIndex(const T* obj) const {
|
||||
return GetObjectIndexImpl(obj);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
@@ -1,40 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <random>
|
||||
|
||||
#include "core/hle/kernel/memory/system_control.h"
|
||||
|
||||
namespace Kernel::Memory::SystemControl {
|
||||
namespace {
|
||||
template <typename F>
|
||||
u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
||||
// Handle the case where the difference is too large to represent.
|
||||
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
|
||||
return f();
|
||||
}
|
||||
|
||||
// Iterate until we get a value in range.
|
||||
const u64 range_size = ((max + 1) - min);
|
||||
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
|
||||
while (true) {
|
||||
if (const u64 rnd = f(); rnd < effective_max) {
|
||||
return min + (rnd % range_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u64 GenerateRandomU64ForInit() {
|
||||
static std::random_device device;
|
||||
static std::mt19937 gen(device());
|
||||
static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
|
||||
return distribution(gen);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
u64 GenerateRandomRange(u64 min, u64 max) {
|
||||
return GenerateUniformRange(min, max, GenerateRandomU64ForInit);
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory::SystemControl
|
@@ -1,13 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory::SystemControl {
|
||||
|
||||
u64 GenerateRandomRange(u64 min, u64 max);
|
||||
|
||||
} // namespace Kernel::Memory::SystemControl
|
@@ -1,170 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
/// Returns the number of threads that are waiting for a mutex, and the highest priority one among
|
||||
/// those.
|
||||
static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
|
||||
const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) {
|
||||
|
||||
std::shared_ptr<Thread> highest_priority_thread;
|
||||
u32 num_waiters = 0;
|
||||
|
||||
for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
|
||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
||||
continue;
|
||||
|
||||
++num_waiters;
|
||||
if (highest_priority_thread == nullptr ||
|
||||
thread->GetPriority() < highest_priority_thread->GetPriority()) {
|
||||
highest_priority_thread = thread;
|
||||
}
|
||||
}
|
||||
|
||||
return {highest_priority_thread, num_waiters};
|
||||
}
|
||||
|
||||
/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
|
||||
static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
|
||||
std::shared_ptr<Thread> new_owner) {
|
||||
current_thread->RemoveMutexWaiter(new_owner);
|
||||
const auto threads = current_thread->GetMutexWaitingThreads();
|
||||
for (const auto& thread : threads) {
|
||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
||||
continue;
|
||||
|
||||
ASSERT(thread->GetLockOwner() == current_thread.get());
|
||||
current_thread->RemoveMutexWaiter(thread);
|
||||
if (new_owner != thread)
|
||||
new_owner->AddMutexWaiter(thread);
|
||||
}
|
||||
}
|
||||
|
||||
Mutex::Mutex(Core::System& system) : system{system} {}
|
||||
Mutex::~Mutex() = default;
|
||||
|
||||
ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
Handle requesting_thread_handle) {
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
std::shared_ptr<Thread> current_thread =
|
||||
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
||||
std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
|
||||
std::shared_ptr<Thread> requesting_thread =
|
||||
handle_table.Get<Thread>(requesting_thread_handle);
|
||||
|
||||
// TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
|
||||
// another thread.
|
||||
ASSERT(requesting_thread == current_thread);
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
|
||||
const u32 addr_value = system.Memory().Read32(address);
|
||||
|
||||
// If the mutex isn't being held, just return success.
|
||||
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
if (holding_thread == nullptr) {
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
// Wait until the mutex is released
|
||||
current_thread->SetMutexWaitAddress(address);
|
||||
current_thread->SetWaitHandle(requesting_thread_handle);
|
||||
|
||||
current_thread->SetStatus(ThreadStatus::WaitMutex);
|
||||
|
||||
// Update the lock holder thread's priority to prevent priority inversion.
|
||||
holding_thread->AddMutexWaiter(current_thread);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
auto* owner = current_thread->GetLockOwner();
|
||||
if (owner != nullptr) {
|
||||
owner->RemoveMutexWaiter(current_thread);
|
||||
}
|
||||
}
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
|
||||
VAddr address) {
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
|
||||
return {ERR_INVALID_ADDRESS, nullptr};
|
||||
}
|
||||
|
||||
auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
|
||||
if (new_owner == nullptr) {
|
||||
system.Memory().Write32(address, 0);
|
||||
return {RESULT_SUCCESS, nullptr};
|
||||
}
|
||||
// Transfer the ownership of the mutex from the previous owner to the new one.
|
||||
TransferMutexOwnership(address, owner, new_owner);
|
||||
u32 mutex_value = new_owner->GetWaitHandle();
|
||||
if (num_waiters >= 2) {
|
||||
// Notify the guest that there are still some threads waiting for the mutex
|
||||
mutex_value |= Mutex::MutexHasWaitersFlag;
|
||||
}
|
||||
new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
new_owner->SetLockOwner(nullptr);
|
||||
new_owner->ResumeFromWait();
|
||||
|
||||
system.Memory().Write32(address, mutex_value);
|
||||
return {RESULT_SUCCESS, new_owner};
|
||||
}
|
||||
|
||||
ResultCode Mutex::Release(VAddr address) {
|
||||
auto& kernel = system.Kernel();
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
std::shared_ptr<Thread> current_thread =
|
||||
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||
|
||||
auto [result, new_owner] = Unlock(current_thread, address);
|
||||
|
||||
if (result != RESULT_SUCCESS && new_owner != nullptr) {
|
||||
new_owner->SetSynchronizationResults(nullptr, result);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,42 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Mutex final {
|
||||
public:
|
||||
explicit Mutex(Core::System& system);
|
||||
~Mutex();
|
||||
|
||||
/// Flag that indicates that a mutex still has threads waiting for it.
|
||||
static constexpr u32 MutexHasWaitersFlag = 0x40000000;
|
||||
/// Mask of the bits in a mutex address value that contain the mutex owner.
|
||||
static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
|
||||
|
||||
/// Attempts to acquire a mutex at the specified address.
|
||||
ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
Handle requesting_thread_handle);
|
||||
|
||||
/// Unlocks a mutex for owner at address
|
||||
std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
|
||||
VAddr address);
|
||||
|
||||
/// Releases the mutex at the specified address.
|
||||
ResultCode Release(VAddr address);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,42 +0,0 @@
|
||||
// Copyright 2018 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Object::Object(KernelCore& kernel_)
|
||||
: kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{"[UNKNOWN KERNEL OBJECT]"} {}
|
||||
Object::Object(KernelCore& kernel_, std::string&& name_)
|
||||
: kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{std::move(name_)} {}
|
||||
Object::~Object() = default;
|
||||
|
||||
bool Object::IsWaitable() const {
|
||||
switch (GetHandleType()) {
|
||||
case HandleType::ReadableEvent:
|
||||
case HandleType::Thread:
|
||||
case HandleType::Process:
|
||||
case HandleType::ServerPort:
|
||||
case HandleType::ServerSession:
|
||||
return true;
|
||||
|
||||
case HandleType::Unknown:
|
||||
case HandleType::Event:
|
||||
case HandleType::WritableEvent:
|
||||
case HandleType::SharedMemory:
|
||||
case HandleType::TransferMemory:
|
||||
case HandleType::ResourceLimit:
|
||||
case HandleType::ClientPort:
|
||||
case HandleType::ClientSession:
|
||||
case HandleType::Session:
|
||||
return false;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,96 +0,0 @@
|
||||
// Copyright 2018 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
using Handle = u32;
|
||||
|
||||
enum class HandleType : u32 {
|
||||
Unknown,
|
||||
Event,
|
||||
WritableEvent,
|
||||
ReadableEvent,
|
||||
SharedMemory,
|
||||
TransferMemory,
|
||||
Thread,
|
||||
Process,
|
||||
ResourceLimit,
|
||||
ClientPort,
|
||||
ServerPort,
|
||||
ClientSession,
|
||||
ServerSession,
|
||||
Session,
|
||||
};
|
||||
|
||||
class Object : NonCopyable, public std::enable_shared_from_this<Object> {
|
||||
public:
|
||||
explicit Object(KernelCore& kernel_);
|
||||
explicit Object(KernelCore& kernel_, std::string&& name_);
|
||||
virtual ~Object();
|
||||
|
||||
/// Returns a unique identifier for the object. For debugging purposes only.
|
||||
u32 GetObjectId() const {
|
||||
return object_id.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
virtual std::string GetTypeName() const {
|
||||
return "[BAD KERNEL OBJECT TYPE]";
|
||||
}
|
||||
virtual std::string GetName() const {
|
||||
return name;
|
||||
}
|
||||
virtual HandleType GetHandleType() const = 0;
|
||||
|
||||
void Close() {
|
||||
// TODO(bunnei): This is a placeholder to decrement the reference count, which we will use
|
||||
// when we implement KAutoObject instead of using shared_ptr.
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a thread can wait on the object
|
||||
* @return True if a thread can wait on the object, otherwise false
|
||||
*/
|
||||
bool IsWaitable() const;
|
||||
|
||||
virtual void Finalize() = 0;
|
||||
|
||||
protected:
|
||||
/// The kernel instance this object was created under.
|
||||
KernelCore& kernel;
|
||||
|
||||
private:
|
||||
std::atomic<u32> object_id{0};
|
||||
std::string name;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> SharedFrom(T* raw) {
|
||||
if (raw == nullptr)
|
||||
return nullptr;
|
||||
return std::static_pointer_cast<T>(raw->shared_from_this());
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to downcast the given Object pointer to a pointer to T.
|
||||
* @return Derived pointer to the object, or `nullptr` if `object` isn't of type T.
|
||||
*/
|
||||
template <typename T>
|
||||
inline std::shared_ptr<T> DynamicObjectCast(std::shared_ptr<Object> object) {
|
||||
if (object != nullptr && object->GetHandleType() == T::HANDLE_TYPE) {
|
||||
return std::static_pointer_cast<T>(object);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,483 +0,0 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <bitset>
|
||||
#include <ctime>
|
||||
#include <memory>
|
||||
#include <random>
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/core.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/k_slab_heap.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
/**
|
||||
* Sets up the primary application thread
|
||||
*
|
||||
* @param system The system instance to create the main thread under.
|
||||
* @param owner_process The parent process for the main thread
|
||||
* @param priority The priority to give the main thread
|
||||
*/
|
||||
void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
|
||||
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
|
||||
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
|
||||
auto thread_res =
|
||||
KThread::CreateUserThread(system, ThreadType::User, "main", entry_point, priority, 0,
|
||||
owner_process.GetIdealCoreId(), stack_top, &owner_process);
|
||||
|
||||
std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap();
|
||||
|
||||
// Register 1 must be a handle to the main thread
|
||||
const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap();
|
||||
thread->GetContext32().cpu_registers[0] = 0;
|
||||
thread->GetContext64().cpu_registers[0] = 0;
|
||||
thread->GetContext32().cpu_registers[1] = thread_handle;
|
||||
thread->GetContext64().cpu_registers[1] = thread_handle;
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
||||
{
|
||||
KScopedSchedulerLock lock{kernel};
|
||||
thread->SetState(ThreadState::Runnable);
|
||||
}
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
// Represents a page used for thread-local storage.
|
||||
//
|
||||
// Each TLS page contains slots that may be used by processes and threads.
|
||||
// Every process and thread is created with a slot in some arbitrary page
|
||||
// (whichever page happens to have an available slot).
|
||||
class TLSPage {
|
||||
public:
|
||||
static constexpr std::size_t num_slot_entries =
|
||||
Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE;
|
||||
|
||||
explicit TLSPage(VAddr address) : base_address{address} {}
|
||||
|
||||
bool HasAvailableSlots() const {
|
||||
return !is_slot_used.all();
|
||||
}
|
||||
|
||||
VAddr GetBaseAddress() const {
|
||||
return base_address;
|
||||
}
|
||||
|
||||
std::optional<VAddr> ReserveSlot() {
|
||||
for (std::size_t i = 0; i < is_slot_used.size(); i++) {
|
||||
if (is_slot_used[i]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
is_slot_used[i] = true;
|
||||
return base_address + (i * Core::Memory::TLS_ENTRY_SIZE);
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void ReleaseSlot(VAddr address) {
|
||||
// Ensure that all given addresses are consistent with how TLS pages
|
||||
// are intended to be used when releasing slots.
|
||||
ASSERT(IsWithinPage(address));
|
||||
ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0);
|
||||
|
||||
const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE;
|
||||
is_slot_used[index] = false;
|
||||
}
|
||||
|
||||
private:
|
||||
bool IsWithinPage(VAddr address) const {
|
||||
return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE;
|
||||
}
|
||||
|
||||
VAddr base_address;
|
||||
std::bitset<num_slot_entries> is_slot_used;
|
||||
};
|
||||
|
||||
std::shared_ptr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
std::shared_ptr<Process> process = std::make_shared<Process>(system);
|
||||
process->name = std::move(name);
|
||||
|
||||
process->resource_limit = kernel.GetSystemResourceLimit();
|
||||
process->status = ProcessStatus::Created;
|
||||
process->program_id = 0;
|
||||
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
|
||||
: kernel.CreateNewUserProcessID();
|
||||
process->capabilities.InitializeForMetadatalessProcess();
|
||||
|
||||
std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr)));
|
||||
std::uniform_int_distribution<u64> distribution;
|
||||
std::generate(process->random_entropy.begin(), process->random_entropy.end(),
|
||||
[&] { return distribution(rng); });
|
||||
|
||||
kernel.AppendNewProcess(process);
|
||||
return process;
|
||||
}
|
||||
|
||||
std::shared_ptr<KResourceLimit> Process::GetResourceLimit() const {
|
||||
return resource_limit;
|
||||
}
|
||||
|
||||
void Process::IncrementThreadCount() {
|
||||
ASSERT(num_threads >= 0);
|
||||
num_created_threads++;
|
||||
|
||||
if (const auto count = ++num_threads; count > peak_num_threads) {
|
||||
peak_num_threads = count;
|
||||
}
|
||||
}
|
||||
|
||||
void Process::DecrementThreadCount() {
|
||||
ASSERT(num_threads > 0);
|
||||
|
||||
if (const auto count = --num_threads; count == 0) {
|
||||
UNIMPLEMENTED_MSG("Process termination is not implemented!");
|
||||
}
|
||||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryAvailable() const {
|
||||
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
|
||||
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
|
||||
main_thread_stack_size};
|
||||
ASSERT(capacity == kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application));
|
||||
if (capacity < memory_usage_capacity) {
|
||||
return capacity;
|
||||
}
|
||||
return memory_usage_capacity;
|
||||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
|
||||
return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
|
||||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryUsed() const {
|
||||
return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() +
|
||||
GetSystemResourceSize();
|
||||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
|
||||
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
|
||||
}
|
||||
|
||||
bool Process::ReleaseUserException(KThread* thread) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
if (exception_thread == thread) {
|
||||
exception_thread = nullptr;
|
||||
|
||||
// Remove waiter thread.
|
||||
s32 num_waiters{};
|
||||
KThread* next = thread->RemoveWaiterByKey(
|
||||
std::addressof(num_waiters),
|
||||
reinterpret_cast<uintptr_t>(std::addressof(exception_thread)));
|
||||
if (next != nullptr) {
|
||||
if (next->GetState() == ThreadState::Waiting) {
|
||||
next->SetState(ThreadState::Runnable);
|
||||
} else {
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void Process::PinCurrentThread() {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Get the current thread.
|
||||
const s32 core_id = GetCurrentCoreId(kernel);
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
|
||||
// Pin it.
|
||||
PinThread(core_id, cur_thread);
|
||||
cur_thread->Pin();
|
||||
|
||||
// An update is needed.
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
|
||||
void Process::UnpinCurrentThread() {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Get the current thread.
|
||||
const s32 core_id = GetCurrentCoreId(kernel);
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
|
||||
// Unpin it.
|
||||
cur_thread->Unpin();
|
||||
UnpinThread(core_id, cur_thread);
|
||||
|
||||
// An update is needed.
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
|
||||
void Process::RegisterThread(const KThread* thread) {
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
|
||||
void Process::UnregisterThread(const KThread* thread) {
|
||||
thread_list.remove(thread);
|
||||
}
|
||||
|
||||
ResultCode Process::Reset() {
|
||||
// Lock the process and the scheduler.
|
||||
KScopedLightLock lk(state_lock);
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Validate that we're in a state that we can reset.
|
||||
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
|
||||
R_UNLESS(is_signaled, ResultInvalidState);
|
||||
|
||||
// Clear signaled.
|
||||
is_signaled = false;
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
std::size_t code_size) {
|
||||
program_id = metadata.GetTitleID();
|
||||
ideal_core = metadata.GetMainThreadCore();
|
||||
is_64bit_process = metadata.Is64BitProgram();
|
||||
system_resource_size = metadata.GetSystemResourceSize();
|
||||
image_size = code_size;
|
||||
|
||||
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||
code_size + system_resource_size);
|
||||
if (!memory_reservation.Succeeded()) {
|
||||
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
|
||||
code_size + system_resource_size);
|
||||
return ResultResourceLimitedExceeded;
|
||||
}
|
||||
// Initialize proces address space
|
||||
if (const ResultCode result{
|
||||
page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
|
||||
code_size, KMemoryManager::Pool::Application)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Map process code region
|
||||
if (const ResultCode result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Initialize process capabilities
|
||||
const auto& caps{metadata.GetKernelCapabilities()};
|
||||
if (const ResultCode result{
|
||||
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Set memory usage capacity
|
||||
switch (metadata.GetAddressSpaceType()) {
|
||||
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
||||
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart();
|
||||
break;
|
||||
|
||||
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
||||
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() +
|
||||
page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart();
|
||||
break;
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// Create TLS region
|
||||
tls_region_address = CreateTLSRegion();
|
||||
memory_reservation.Commit();
|
||||
|
||||
return handle_table.SetSize(capabilities.GetHandleTableSize());
|
||||
}
|
||||
|
||||
void Process::Run(s32 main_thread_priority, u64 stack_size) {
|
||||
AllocateMainThreadStack(stack_size);
|
||||
resource_limit->Reserve(LimitableResource::Threads, 1);
|
||||
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
|
||||
|
||||
const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size};
|
||||
ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError());
|
||||
|
||||
ChangeStatus(ProcessStatus::Running);
|
||||
|
||||
SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
|
||||
}
|
||||
|
||||
void Process::PrepareForTermination() {
|
||||
ChangeStatus(ProcessStatus::Exiting);
|
||||
|
||||
const auto stop_threads = [this](const std::vector<std::shared_ptr<KThread>>& thread_list) {
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->GetOwnerProcess() != this)
|
||||
continue;
|
||||
|
||||
if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread())
|
||||
continue;
|
||||
|
||||
// TODO(Subv): When are the other running/ready threads terminated?
|
||||
ASSERT_MSG(thread->GetState() == ThreadState::Waiting,
|
||||
"Exiting processes with non-waiting threads is currently unimplemented");
|
||||
|
||||
thread->Exit();
|
||||
}
|
||||
};
|
||||
|
||||
stop_threads(system.GlobalSchedulerContext().GetThreadList());
|
||||
|
||||
FreeTLSRegion(tls_region_address);
|
||||
tls_region_address = 0;
|
||||
|
||||
if (resource_limit) {
|
||||
resource_limit->Release(LimitableResource::PhysicalMemory,
|
||||
main_thread_stack_size + image_size);
|
||||
}
|
||||
|
||||
ChangeStatus(ProcessStatus::Exited);
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to find a TLS page that contains a free slot for
|
||||
* use by a thread.
|
||||
*
|
||||
* @returns If a page with an available slot is found, then an iterator
|
||||
* pointing to the page is returned. Otherwise the end iterator
|
||||
* is returned instead.
|
||||
*/
|
||||
static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
|
||||
return std::find_if(tls_pages.begin(), tls_pages.end(),
|
||||
[](const auto& page) { return page.HasAvailableSlots(); });
|
||||
}
|
||||
|
||||
VAddr Process::CreateTLSRegion() {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
|
||||
tls_page_iter != tls_pages.cend()) {
|
||||
return *tls_page_iter->ReserveSlot();
|
||||
}
|
||||
|
||||
Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
|
||||
ASSERT(tls_page_ptr);
|
||||
|
||||
const VAddr start{page_table->GetKernelMapRegionStart()};
|
||||
const VAddr size{page_table->GetKernelMapRegionEnd() - start};
|
||||
const PAddr tls_map_addr{system.DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
|
||||
const VAddr tls_page_addr{page_table
|
||||
->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
|
||||
KMemoryState::ThreadLocal,
|
||||
KMemoryPermission::ReadAndWrite,
|
||||
tls_map_addr)
|
||||
.ValueOr(0)};
|
||||
|
||||
ASSERT(tls_page_addr);
|
||||
|
||||
std::memset(tls_page_ptr, 0, PageSize);
|
||||
tls_pages.emplace_back(tls_page_addr);
|
||||
|
||||
const auto reserve_result{tls_pages.back().ReserveSlot()};
|
||||
ASSERT(reserve_result.has_value());
|
||||
|
||||
return *reserve_result;
|
||||
}
|
||||
|
||||
void Process::FreeTLSRegion(VAddr tls_address) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
|
||||
auto iter =
|
||||
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
|
||||
return page.GetBaseAddress() == aligned_address;
|
||||
});
|
||||
|
||||
// Something has gone very wrong if we're freeing a region
|
||||
// with no actual page available.
|
||||
ASSERT(iter != tls_pages.cend());
|
||||
|
||||
iter->ReleaseSlot(tls_address);
|
||||
}
|
||||
|
||||
void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||
std::lock_guard lock{HLE::g_hle_lock};
|
||||
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
|
||||
KMemoryPermission permission) {
|
||||
page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
||||
};
|
||||
|
||||
system.Memory().WriteBlock(*this, base_addr, code_set.memory.data(), code_set.memory.size());
|
||||
|
||||
ReprotectSegment(code_set.CodeSegment(), KMemoryPermission::ReadAndExecute);
|
||||
ReprotectSegment(code_set.RODataSegment(), KMemoryPermission::Read);
|
||||
ReprotectSegment(code_set.DataSegment(), KMemoryPermission::ReadAndWrite);
|
||||
}
|
||||
|
||||
bool Process::IsSignaled() const {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
return is_signaled;
|
||||
}
|
||||
|
||||
Process::Process(Core::System& system)
|
||||
: KSynchronizationObject{system.Kernel()}, page_table{std::make_unique<KPageTable>(system)},
|
||||
handle_table{system.Kernel()}, address_arbiter{system}, condition_var{system},
|
||||
state_lock{system.Kernel()}, system{system} {}
|
||||
|
||||
Process::~Process() = default;
|
||||
|
||||
void Process::ChangeStatus(ProcessStatus new_status) {
|
||||
if (status == new_status) {
|
||||
return;
|
||||
}
|
||||
|
||||
status = new_status;
|
||||
is_signaled = true;
|
||||
NotifyAvailable();
|
||||
}
|
||||
|
||||
ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) {
|
||||
ASSERT(stack_size);
|
||||
|
||||
// The kernel always ensures that the given stack size is page aligned.
|
||||
main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
|
||||
|
||||
const VAddr start{page_table->GetStackRegionStart()};
|
||||
const std::size_t size{page_table->GetStackRegionEnd() - start};
|
||||
|
||||
CASCADE_RESULT(main_thread_stack_top,
|
||||
page_table->AllocateAndMapMemory(
|
||||
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
|
||||
KMemoryState::Stack, KMemoryPermission::ReadAndWrite));
|
||||
|
||||
main_thread_stack_top += main_thread_stack_size;
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,482 +0,0 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <list>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/process_capability.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace FileSys {
|
||||
class ProgramMetadata;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class KPageTable;
|
||||
class KResourceLimit;
|
||||
class KThread;
|
||||
class TLSPage;
|
||||
|
||||
struct CodeSet;
|
||||
|
||||
enum class MemoryRegion : u16 {
|
||||
APPLICATION = 1,
|
||||
SYSTEM = 2,
|
||||
BASE = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* Indicates the status of a Process instance.
|
||||
*
|
||||
* @note These match the values as used by kernel,
|
||||
* so new entries should only be added if RE
|
||||
* shows that a new value has been introduced.
|
||||
*/
|
||||
enum class ProcessStatus {
|
||||
Created,
|
||||
CreatedWithDebuggerAttached,
|
||||
Running,
|
||||
WaitingForDebuggerToAttach,
|
||||
DebuggerAttached,
|
||||
Exiting,
|
||||
Exited,
|
||||
DebugBreak,
|
||||
};
|
||||
|
||||
class Process final : public KSynchronizationObject {
|
||||
public:
|
||||
explicit Process(Core::System& system);
|
||||
~Process() override;
|
||||
|
||||
enum : u64 {
|
||||
/// Lowest allowed process ID for a kernel initial process.
|
||||
InitialKIPIDMin = 1,
|
||||
/// Highest allowed process ID for a kernel initial process.
|
||||
InitialKIPIDMax = 80,
|
||||
|
||||
/// Lowest allowed process ID for a userland process.
|
||||
ProcessIDMin = 81,
|
||||
/// Highest allowed process ID for a userland process.
|
||||
ProcessIDMax = 0xFFFFFFFFFFFFFFFF,
|
||||
};
|
||||
|
||||
// Used to determine how process IDs are assigned.
|
||||
enum class ProcessType {
|
||||
KernelInternal,
|
||||
Userland,
|
||||
};
|
||||
|
||||
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
|
||||
|
||||
static std::shared_ptr<Process> Create(Core::System& system, std::string name,
|
||||
ProcessType type);
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "Process";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::Process;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' page table.
|
||||
KPageTable& PageTable() {
|
||||
return *page_table;
|
||||
}
|
||||
|
||||
/// Gets const a reference to the process' page table.
|
||||
const KPageTable& PageTable() const {
|
||||
return *page_table;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' handle table.
|
||||
HandleTable& GetHandleTable() {
|
||||
return handle_table;
|
||||
}
|
||||
|
||||
/// Gets a const reference to the process' handle table.
|
||||
const HandleTable& GetHandleTable() const {
|
||||
return handle_table;
|
||||
}
|
||||
|
||||
ResultCode SignalToAddress(VAddr address) {
|
||||
return condition_var.SignalToAddress(address);
|
||||
}
|
||||
|
||||
ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
|
||||
return condition_var.WaitForAddress(handle, address, tag);
|
||||
}
|
||||
|
||||
void SignalConditionVariable(u64 cv_key, int32_t count) {
|
||||
return condition_var.Signal(cv_key, count);
|
||||
}
|
||||
|
||||
ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||
return condition_var.Wait(address, cv_key, tag, ns);
|
||||
}
|
||||
|
||||
ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
|
||||
s32 count) {
|
||||
return address_arbiter.SignalToAddress(address, signal_type, value, count);
|
||||
}
|
||||
|
||||
ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
||||
}
|
||||
|
||||
/// Gets the address to the process' dedicated TLS region.
|
||||
VAddr GetTLSRegionAddress() const {
|
||||
return tls_region_address;
|
||||
}
|
||||
|
||||
/// Gets the current status of the process
|
||||
ProcessStatus GetStatus() const {
|
||||
return status;
|
||||
}
|
||||
|
||||
/// Gets the unique ID that identifies this particular process.
|
||||
u64 GetProcessID() const {
|
||||
return process_id;
|
||||
}
|
||||
|
||||
/// Gets the title ID corresponding to this process.
|
||||
u64 GetTitleID() const {
|
||||
return program_id;
|
||||
}
|
||||
|
||||
/// Gets the resource limit descriptor for this process
|
||||
std::shared_ptr<KResourceLimit> GetResourceLimit() const;
|
||||
|
||||
/// Gets the ideal CPU core ID for this process
|
||||
u8 GetIdealCoreId() const {
|
||||
return ideal_core;
|
||||
}
|
||||
|
||||
/// Checks if the specified thread priority is valid.
|
||||
bool CheckThreadPriority(s32 prio) const {
|
||||
return ((1ULL << prio) & GetPriorityMask()) != 0;
|
||||
}
|
||||
|
||||
/// Gets the bitmask of allowed cores that this process' threads can run on.
|
||||
u64 GetCoreMask() const {
|
||||
return capabilities.GetCoreMask();
|
||||
}
|
||||
|
||||
/// Gets the bitmask of allowed thread priorities.
|
||||
u64 GetPriorityMask() const {
|
||||
return capabilities.GetPriorityMask();
|
||||
}
|
||||
|
||||
/// Gets the amount of secure memory to allocate for memory management.
|
||||
u32 GetSystemResourceSize() const {
|
||||
return system_resource_size;
|
||||
}
|
||||
|
||||
/// Gets the amount of secure memory currently in use for memory management.
|
||||
u32 GetSystemResourceUsage() const {
|
||||
// On hardware, this returns the amount of system resource memory that has
|
||||
// been used by the kernel. This is problematic for Yuzu to emulate, because
|
||||
// system resource memory is used for page tables -- and yuzu doesn't really
|
||||
// have a way to calculate how much memory is required for page tables for
|
||||
// the current process at any given time.
|
||||
// TODO: Is this even worth implementing? Games may retrieve this value via
|
||||
// an SDK function that gets used + available system resource size for debug
|
||||
// or diagnostic purposes. However, it seems unlikely that a game would make
|
||||
// decisions based on how much system memory is dedicated to its page tables.
|
||||
// Is returning a value other than zero wise?
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Whether this process is an AArch64 or AArch32 process.
|
||||
bool Is64BitProcess() const {
|
||||
return is_64bit_process;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsSuspended() const {
|
||||
return is_suspended;
|
||||
}
|
||||
|
||||
void SetSuspended(bool suspended) {
|
||||
is_suspended = suspended;
|
||||
}
|
||||
|
||||
/// Gets the total running time of the process instance in ticks.
|
||||
u64 GetCPUTimeTicks() const {
|
||||
return total_process_running_time_ticks;
|
||||
}
|
||||
|
||||
/// Updates the total running time, adding the given ticks to it.
|
||||
void UpdateCPUTimeTicks(u64 ticks) {
|
||||
total_process_running_time_ticks += ticks;
|
||||
}
|
||||
|
||||
/// Gets the process schedule count, used for thread yelding
|
||||
s64 GetScheduledCount() const {
|
||||
return schedule_count;
|
||||
}
|
||||
|
||||
/// Increments the process schedule count, used for thread yielding.
|
||||
void IncrementScheduledCount() {
|
||||
++schedule_count;
|
||||
}
|
||||
|
||||
void IncrementThreadCount();
|
||||
void DecrementThreadCount();
|
||||
|
||||
void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
|
||||
running_threads[core] = thread;
|
||||
running_thread_idle_counts[core] = idle_count;
|
||||
}
|
||||
|
||||
void ClearRunningThread(KThread* thread) {
|
||||
for (size_t i = 0; i < running_threads.size(); ++i) {
|
||||
if (running_threads[i] == thread) {
|
||||
running_threads[i] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] KThread* GetRunningThread(s32 core) const {
|
||||
return running_threads[core];
|
||||
}
|
||||
|
||||
bool ReleaseUserException(KThread* thread);
|
||||
|
||||
[[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
|
||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
return pinned_threads[core_id];
|
||||
}
|
||||
|
||||
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
|
||||
u64 GetRandomEntropy(std::size_t index) const {
|
||||
return random_entropy.at(index);
|
||||
}
|
||||
|
||||
/// Retrieves the total physical memory available to this process in bytes.
|
||||
u64 GetTotalPhysicalMemoryAvailable() const;
|
||||
|
||||
/// Retrieves the total physical memory available to this process in bytes,
|
||||
/// without the size of the personal system resource heap added to it.
|
||||
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const;
|
||||
|
||||
/// Retrieves the total physical memory used by this process in bytes.
|
||||
u64 GetTotalPhysicalMemoryUsed() const;
|
||||
|
||||
/// Retrieves the total physical memory used by this process in bytes,
|
||||
/// without the size of the personal system resource heap added to it.
|
||||
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
|
||||
|
||||
/// Gets the list of all threads created with this process as their owner.
|
||||
const std::list<const KThread*>& GetThreadList() const {
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
/// Registers a thread as being created under this process,
|
||||
/// adding it to this process' thread list.
|
||||
void RegisterThread(const KThread* thread);
|
||||
|
||||
/// Unregisters a thread from this process, removing it
|
||||
/// from this process' thread list.
|
||||
void UnregisterThread(const KThread* thread);
|
||||
|
||||
/// Clears the signaled state of the process if and only if it's signaled.
|
||||
///
|
||||
/// @pre The process must not be already terminated. If this is called on a
|
||||
/// terminated process, then ERR_INVALID_STATE will be returned.
|
||||
///
|
||||
/// @pre The process must be in a signaled state. If this is called on a
|
||||
/// process instance that is not signaled, ERR_INVALID_STATE will be
|
||||
/// returned.
|
||||
ResultCode Reset();
|
||||
|
||||
/**
|
||||
* Loads process-specifics configuration info with metadata provided
|
||||
* by an executable.
|
||||
*
|
||||
* @param metadata The provided metadata to load process specific info from.
|
||||
*
|
||||
* @returns RESULT_SUCCESS if all relevant metadata was able to be
|
||||
* loaded and parsed. Otherwise, an error code is returned.
|
||||
*/
|
||||
ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
|
||||
|
||||
/**
|
||||
* Starts the main application thread for this process.
|
||||
*
|
||||
* @param main_thread_priority The priority for the main thread.
|
||||
* @param stack_size The stack size for the main thread in bytes.
|
||||
*/
|
||||
void Run(s32 main_thread_priority, u64 stack_size);
|
||||
|
||||
/**
|
||||
* Prepares a process for termination by stopping all of its threads
|
||||
* and clearing any other resources.
|
||||
*/
|
||||
void PrepareForTermination();
|
||||
|
||||
void LoadModule(CodeSet code_set, VAddr base_addr);
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
void PinCurrentThread();
|
||||
void UnpinCurrentThread();
|
||||
|
||||
KLightLock& GetStateLock() {
|
||||
return state_lock;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Thread-local storage management
|
||||
|
||||
// Marks the next available region as used and returns the address of the slot.
|
||||
[[nodiscard]] VAddr CreateTLSRegion();
|
||||
|
||||
// Frees a used TLS slot identified by the given address
|
||||
void FreeTLSRegion(VAddr tls_address);
|
||||
|
||||
private:
|
||||
void PinThread(s32 core_id, KThread* thread) {
|
||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
ASSERT(thread != nullptr);
|
||||
ASSERT(pinned_threads[core_id] == nullptr);
|
||||
pinned_threads[core_id] = thread;
|
||||
}
|
||||
|
||||
void UnpinThread(s32 core_id, KThread* thread) {
|
||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
ASSERT(thread != nullptr);
|
||||
ASSERT(pinned_threads[core_id] == thread);
|
||||
pinned_threads[core_id] = nullptr;
|
||||
}
|
||||
|
||||
/// Changes the process status. If the status is different
|
||||
/// from the current process status, then this will trigger
|
||||
/// a process signal.
|
||||
void ChangeStatus(ProcessStatus new_status);
|
||||
|
||||
/// Allocates the main thread stack for the process, given the stack size in bytes.
|
||||
ResultCode AllocateMainThreadStack(std::size_t stack_size);
|
||||
|
||||
/// Memory manager for this process
|
||||
std::unique_ptr<KPageTable> page_table;
|
||||
|
||||
/// Current status of the process
|
||||
ProcessStatus status{};
|
||||
|
||||
/// The ID of this process
|
||||
u64 process_id = 0;
|
||||
|
||||
/// Title ID corresponding to the process
|
||||
u64 program_id = 0;
|
||||
|
||||
/// Specifies additional memory to be reserved for the process's memory management by the
|
||||
/// system. When this is non-zero, secure memory is allocated and used for page table allocation
|
||||
/// instead of using the normal global page tables/memory block management.
|
||||
u32 system_resource_size = 0;
|
||||
|
||||
/// Resource limit descriptor for this process
|
||||
std::shared_ptr<KResourceLimit> resource_limit;
|
||||
|
||||
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
||||
u8 ideal_core = 0;
|
||||
|
||||
/// The Thread Local Storage area is allocated as processes create threads,
|
||||
/// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
|
||||
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
|
||||
/// page as a bitmask.
|
||||
/// This vector will grow as more pages are allocated for new threads.
|
||||
std::vector<TLSPage> tls_pages;
|
||||
|
||||
/// Contains the parsed process capability descriptors.
|
||||
ProcessCapabilities capabilities;
|
||||
|
||||
/// Whether or not this process is AArch64, or AArch32.
|
||||
/// By default, we currently assume this is true, unless otherwise
|
||||
/// specified by metadata provided to the process during loading.
|
||||
bool is_64bit_process = true;
|
||||
|
||||
/// Total running time for the process in ticks.
|
||||
u64 total_process_running_time_ticks = 0;
|
||||
|
||||
/// Per-process handle table for storing created object handles in.
|
||||
HandleTable handle_table;
|
||||
|
||||
/// Per-process address arbiter.
|
||||
KAddressArbiter address_arbiter;
|
||||
|
||||
/// The per-process mutex lock instance used for handling various
|
||||
/// forms of services, such as lock arbitration, and condition
|
||||
/// variable related facilities.
|
||||
KConditionVariable condition_var;
|
||||
|
||||
/// Address indicating the location of the process' dedicated TLS region.
|
||||
VAddr tls_region_address = 0;
|
||||
|
||||
/// Random values for svcGetInfo RandomEntropy
|
||||
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
|
||||
|
||||
/// List of threads that are running with this process as their owner.
|
||||
std::list<const KThread*> thread_list;
|
||||
|
||||
/// Address of the top of the main thread's stack
|
||||
VAddr main_thread_stack_top{};
|
||||
|
||||
/// Size of the main thread's stack
|
||||
std::size_t main_thread_stack_size{};
|
||||
|
||||
/// Memory usage capacity for the process
|
||||
std::size_t memory_usage_capacity{};
|
||||
|
||||
/// Process total image size
|
||||
std::size_t image_size{};
|
||||
|
||||
/// Name of this process
|
||||
std::string name;
|
||||
|
||||
/// Schedule count of this process
|
||||
s64 schedule_count{};
|
||||
|
||||
bool is_signaled{};
|
||||
bool is_suspended{};
|
||||
|
||||
std::atomic<s32> num_created_threads{};
|
||||
std::atomic<u16> num_threads{};
|
||||
u16 peak_num_threads{};
|
||||
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
|
||||
std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{};
|
||||
|
||||
KThread* exception_thread{};
|
||||
|
||||
KLightLock state_lock;
|
||||
|
||||
/// System context
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,52 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ReadableEvent::ReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
ReadableEvent::~ReadableEvent() = default;
|
||||
|
||||
void ReadableEvent::Signal() {
|
||||
if (is_signaled) {
|
||||
return;
|
||||
}
|
||||
|
||||
is_signaled = true;
|
||||
NotifyAvailable();
|
||||
}
|
||||
|
||||
bool ReadableEvent::IsSignaled() const {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
return is_signaled;
|
||||
}
|
||||
|
||||
void ReadableEvent::Clear() {
|
||||
is_signaled = false;
|
||||
}
|
||||
|
||||
ResultCode ReadableEvent::Reset() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (!is_signaled) {
|
||||
LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
|
||||
GetObjectId(), GetTypeName(), GetName());
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
Clear();
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,59 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class WritableEvent;
|
||||
|
||||
class ReadableEvent final : public KSynchronizationObject {
|
||||
friend class WritableEvent;
|
||||
|
||||
public:
|
||||
~ReadableEvent() override;
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ReadableEvent";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
/// Unconditionally clears the readable event's state.
|
||||
void Clear();
|
||||
|
||||
/// Clears the readable event's state if and only if it
|
||||
/// has already been signaled.
|
||||
///
|
||||
/// @pre The event must be in a signaled state. If this event
|
||||
/// is in an unsignaled state and this function is called,
|
||||
/// then ERR_INVALID_STATE will be returned.
|
||||
ResultCode Reset();
|
||||
|
||||
void Signal();
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
private:
|
||||
explicit ReadableEvent(KernelCore& kernel);
|
||||
|
||||
bool is_signaled{};
|
||||
std::string name; ///< Name of event (optional)
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,73 +0,0 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
constexpr std::size_t ResourceTypeToIndex(ResourceType type) {
|
||||
return static_cast<std::size_t>(type);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {}
|
||||
ResourceLimit::~ResourceLimit() = default;
|
||||
|
||||
bool ResourceLimit::Reserve(ResourceType resource, s64 amount) {
|
||||
return Reserve(resource, amount, 10000000000);
|
||||
}
|
||||
|
||||
bool ResourceLimit::Reserve(ResourceType resource, s64 amount, u64 timeout) {
|
||||
const std::size_t index{ResourceTypeToIndex(resource)};
|
||||
|
||||
s64 new_value = current[index] + amount;
|
||||
if (new_value > limit[index] && available[index] + amount <= limit[index]) {
|
||||
// TODO(bunnei): This is wrong for multicore, we should wait the calling thread for timeout
|
||||
new_value = current[index] + amount;
|
||||
}
|
||||
|
||||
if (new_value <= limit[index]) {
|
||||
current[index] = new_value;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void ResourceLimit::Release(ResourceType resource, u64 amount) {
|
||||
Release(resource, amount, amount);
|
||||
}
|
||||
|
||||
void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) {
|
||||
const std::size_t index{ResourceTypeToIndex(resource)};
|
||||
|
||||
current[index] -= used_amount;
|
||||
available[index] -= available_amount;
|
||||
}
|
||||
|
||||
std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
|
||||
return std::make_shared<ResourceLimit>(kernel);
|
||||
}
|
||||
|
||||
s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
|
||||
return limit.at(ResourceTypeToIndex(resource)) - current.at(ResourceTypeToIndex(resource));
|
||||
}
|
||||
|
||||
s64 ResourceLimit::GetMaxResourceValue(ResourceType resource) const {
|
||||
return limit.at(ResourceTypeToIndex(resource));
|
||||
}
|
||||
|
||||
ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) {
|
||||
const std::size_t index{ResourceTypeToIndex(resource)};
|
||||
if (current[index] <= value) {
|
||||
limit[index] = value;
|
||||
return RESULT_SUCCESS;
|
||||
} else {
|
||||
LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}", resource,
|
||||
value, index);
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
}
|
||||
} // namespace Kernel
|
@@ -1,106 +0,0 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
enum class ResourceType : u32 {
|
||||
PhysicalMemory,
|
||||
Threads,
|
||||
Events,
|
||||
TransferMemory,
|
||||
Sessions,
|
||||
|
||||
// Used as a count, not an actual type.
|
||||
ResourceTypeCount
|
||||
};
|
||||
|
||||
constexpr bool IsValidResourceType(ResourceType type) {
|
||||
return type < ResourceType::ResourceTypeCount;
|
||||
}
|
||||
|
||||
class ResourceLimit final : public Object {
|
||||
public:
|
||||
explicit ResourceLimit(KernelCore& kernel);
|
||||
~ResourceLimit() override;
|
||||
|
||||
/// Creates a resource limit object.
|
||||
static std::shared_ptr<ResourceLimit> Create(KernelCore& kernel);
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ResourceLimit";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return GetTypeName();
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
bool Reserve(ResourceType resource, s64 amount);
|
||||
bool Reserve(ResourceType resource, s64 amount, u64 timeout);
|
||||
void Release(ResourceType resource, u64 amount);
|
||||
void Release(ResourceType resource, u64 used_amount, u64 available_amount);
|
||||
|
||||
/**
|
||||
* Gets the current value for the specified resource.
|
||||
* @param resource Requested resource type
|
||||
* @returns The current value of the resource type
|
||||
*/
|
||||
s64 GetCurrentResourceValue(ResourceType resource) const;
|
||||
|
||||
/**
|
||||
* Gets the max value for the specified resource.
|
||||
* @param resource Requested resource type
|
||||
* @returns The max value of the resource type
|
||||
*/
|
||||
s64 GetMaxResourceValue(ResourceType resource) const;
|
||||
|
||||
/**
|
||||
* Sets the limit value for a given resource type.
|
||||
*
|
||||
* @param resource The resource type to apply the limit to.
|
||||
* @param value The limit to apply to the given resource type.
|
||||
*
|
||||
* @return A result code indicating if setting the limit value
|
||||
* was successful or not.
|
||||
*
|
||||
* @note The supplied limit value *must* be greater than or equal to
|
||||
* the current resource value for the given resource type,
|
||||
* otherwise ERR_INVALID_STATE will be returned.
|
||||
*/
|
||||
ResultCode SetLimitValue(ResourceType resource, s64 value);
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
private:
|
||||
// TODO(Subv): Increment resource limit current values in their respective Kernel::T::Create
|
||||
// functions
|
||||
//
|
||||
// Currently we have no way of distinguishing if a Create was called by the running application,
|
||||
// or by a service module. Approach this once we have separated the service modules into their
|
||||
// own processes
|
||||
|
||||
using ResourceArray =
|
||||
std::array<s64, static_cast<std::size_t>(ResourceType::ResourceTypeCount)>;
|
||||
|
||||
ResourceArray limit{};
|
||||
ResourceArray current{};
|
||||
ResourceArray available{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,54 +0,0 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <tuple>
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/server_port.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ServerPort::ServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
ServerPort::~ServerPort() = default;
|
||||
|
||||
ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
|
||||
if (pending_sessions.empty()) {
|
||||
return ResultNotFound;
|
||||
}
|
||||
|
||||
auto session = std::move(pending_sessions.back());
|
||||
pending_sessions.pop_back();
|
||||
return MakeResult(std::move(session));
|
||||
}
|
||||
|
||||
void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) {
|
||||
pending_sessions.push_back(std::move(pending_session));
|
||||
if (pending_sessions.size() == 1) {
|
||||
NotifyAvailable();
|
||||
}
|
||||
}
|
||||
|
||||
bool ServerPort::IsSignaled() const {
|
||||
return !pending_sessions.empty();
|
||||
}
|
||||
|
||||
ServerPort::PortPair ServerPort::CreatePortPair(KernelCore& kernel, u32 max_sessions,
|
||||
std::string name) {
|
||||
std::shared_ptr<ServerPort> server_port = std::make_shared<ServerPort>(kernel);
|
||||
std::shared_ptr<ClientPort> client_port = std::make_shared<ClientPort>(kernel);
|
||||
|
||||
server_port->name = name + "_Server";
|
||||
client_port->name = name + "_Client";
|
||||
client_port->server_port = server_port;
|
||||
client_port->max_sessions = max_sessions;
|
||||
client_port->active_sessions = 0;
|
||||
|
||||
return std::make_pair(std::move(server_port), std::move(client_port));
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,98 +0,0 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ClientPort;
|
||||
class KernelCore;
|
||||
class ServerSession;
|
||||
class SessionRequestHandler;
|
||||
|
||||
class ServerPort final : public KSynchronizationObject {
|
||||
public:
|
||||
explicit ServerPort(KernelCore& kernel);
|
||||
~ServerPort() override;
|
||||
|
||||
using HLEHandler = std::shared_ptr<SessionRequestHandler>;
|
||||
using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>;
|
||||
|
||||
/**
|
||||
* Creates a pair of ServerPort and an associated ClientPort.
|
||||
*
|
||||
* @param kernel The kernel instance to create the port pair under.
|
||||
* @param max_sessions Maximum number of sessions to the port
|
||||
* @param name Optional name of the ports
|
||||
* @return The created port tuple
|
||||
*/
|
||||
static PortPair CreatePortPair(KernelCore& kernel, u32 max_sessions,
|
||||
std::string name = "UnknownPort");
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ServerPort";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ServerPort;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Accepts a pending incoming connection on this port. If there are no pending sessions, will
|
||||
* return ERR_NO_PENDING_SESSIONS.
|
||||
*/
|
||||
ResultVal<std::shared_ptr<ServerSession>> Accept();
|
||||
|
||||
/// Whether or not this server port has an HLE handler available.
|
||||
bool HasHLEHandler() const {
|
||||
return hle_handler != nullptr;
|
||||
}
|
||||
|
||||
/// Gets the HLE handler for this port.
|
||||
HLEHandler GetHLEHandler() const {
|
||||
return hle_handler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
|
||||
* will inherit a reference to this handler.
|
||||
*/
|
||||
void SetHleHandler(HLEHandler hle_handler_) {
|
||||
hle_handler = std::move(hle_handler_);
|
||||
}
|
||||
|
||||
/// Appends a ServerSession to the collection of ServerSessions
|
||||
/// waiting to be accepted by this port.
|
||||
void AppendPendingSession(std::shared_ptr<ServerSession> pending_session);
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
private:
|
||||
/// ServerSessions waiting to be accepted by the port
|
||||
std::vector<std::shared_ptr<ServerSession>> pending_sessions;
|
||||
|
||||
/// This session's HLE request handler template (optional)
|
||||
/// ServerSessions created from this port inherit a reference to this handler.
|
||||
HLEHandler hle_handler;
|
||||
|
||||
/// Name of the port (optional)
|
||||
std::string name;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,170 +0,0 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ServerSession::ServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
|
||||
ServerSession::~ServerSession() {
|
||||
kernel.ReleaseServiceThread(service_thread);
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name) {
|
||||
std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)};
|
||||
|
||||
session->name = std::move(name);
|
||||
session->parent = std::move(parent);
|
||||
session->service_thread = kernel.CreateServiceThread(session->name);
|
||||
|
||||
return MakeResult(std::move(session));
|
||||
}
|
||||
|
||||
bool ServerSession::IsSignaled() const {
|
||||
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
|
||||
if (!parent->Client()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Wait if we have no pending requests, or if we're currently handling a request.
|
||||
return !pending_requesting_threads.empty() && currently_handling == nullptr;
|
||||
}
|
||||
|
||||
void ServerSession::ClientDisconnected() {
|
||||
// We keep a shared pointer to the hle handler to keep it alive throughout
|
||||
// the call to ClientDisconnected, as ClientDisconnected invalidates the
|
||||
// hle_handler member itself during the course of the function executing.
|
||||
std::shared_ptr<SessionRequestHandler> handler = hle_handler;
|
||||
if (handler) {
|
||||
// Note that after this returns, this server session's hle_handler is
|
||||
// invalidated (set to null).
|
||||
handler->ClientDisconnected(SharedFrom(this));
|
||||
}
|
||||
|
||||
// Clean up the list of client threads with pending requests, they are unneeded now that the
|
||||
// client endpoint is closed.
|
||||
pending_requesting_threads.clear();
|
||||
currently_handling = nullptr;
|
||||
}
|
||||
|
||||
void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) {
|
||||
domain_request_handlers.push_back(std::move(handler));
|
||||
}
|
||||
|
||||
std::size_t ServerSession::NumDomainRequestHandlers() const {
|
||||
return domain_request_handlers.size();
|
||||
}
|
||||
|
||||
ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
|
||||
if (!context.HasDomainMessageHeader()) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
// Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
|
||||
context.SetDomainRequestHandlers(domain_request_handlers);
|
||||
|
||||
// If there is a DomainMessageHeader, then this is CommandType "Request"
|
||||
const auto& domain_message_header = context.GetDomainMessageHeader();
|
||||
const u32 object_id{domain_message_header.object_id};
|
||||
switch (domain_message_header.command) {
|
||||
case IPC::DomainMessageHeader::CommandType::SendMessage:
|
||||
if (object_id > domain_request_handlers.size()) {
|
||||
LOG_CRITICAL(IPC,
|
||||
"object_id {} is too big! This probably means a recent service call "
|
||||
"to {} needed to return a new interface!",
|
||||
object_id, name);
|
||||
UNREACHABLE();
|
||||
return RESULT_SUCCESS; // Ignore error if asserts are off
|
||||
}
|
||||
return domain_request_handlers[object_id - 1]->HandleSyncRequest(context);
|
||||
|
||||
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
|
||||
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
|
||||
|
||||
domain_request_handlers[object_id - 1] = nullptr;
|
||||
|
||||
IPC::ResponseBuilder rb{context, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
|
||||
ASSERT(false);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<KThread> thread,
|
||||
Core::Memory::Memory& memory) {
|
||||
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
|
||||
auto context =
|
||||
std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread));
|
||||
|
||||
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
|
||||
|
||||
if (auto strong_ptr = service_thread.lock()) {
|
||||
strong_ptr->QueueSyncRequest(*this, std::move(context));
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
ResultCode result = RESULT_SUCCESS;
|
||||
// If the session has been converted to a domain, handle the domain request
|
||||
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||
result = HandleDomainSyncRequest(context);
|
||||
// If there is no domain header, the regular session handler is used
|
||||
} else if (hle_handler != nullptr) {
|
||||
// If this ServerSession has an associated HLE handler, forward the request to it.
|
||||
result = hle_handler->HandleSyncRequest(context);
|
||||
}
|
||||
|
||||
if (convert_to_domain) {
|
||||
ASSERT_MSG(IsSession(), "ServerSession is already a domain instance.");
|
||||
domain_request_handlers = {hle_handler};
|
||||
convert_to_domain = false;
|
||||
}
|
||||
|
||||
// Some service requests require the thread to block
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (!context.IsThreadWaiting()) {
|
||||
context.GetThread().Wakeup();
|
||||
context.GetThread().SetSyncedObject(nullptr, result);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<KThread> thread,
|
||||
Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
return QueueSyncRequest(std::move(thread), memory);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,171 +0,0 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/threadsafe_queue.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
struct EventType;
|
||||
} // namespace Core::Timing
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class HLERequestContext;
|
||||
class KernelCore;
|
||||
class Session;
|
||||
class SessionRequestHandler;
|
||||
class KThread;
|
||||
|
||||
/**
|
||||
* Kernel object representing the server endpoint of an IPC session. Sessions are the basic CTR-OS
|
||||
* primitive for communication between different processes, and are used to implement service calls
|
||||
* to the various system services.
|
||||
*
|
||||
* To make a service call, the client must write the command header and parameters to the buffer
|
||||
* located at offset 0x80 of the TLS (Thread-Local Storage) area, then execute a SendSyncRequest
|
||||
* SVC call with its ClientSession handle. The kernel will read the command header, using it to
|
||||
* marshall the parameters to the process at the server endpoint of the session.
|
||||
* After the server replies to the request, the response is marshalled back to the caller's
|
||||
* TLS buffer and control is transferred back to it.
|
||||
*/
|
||||
class ServerSession final : public KSynchronizationObject {
|
||||
friend class ServiceThread;
|
||||
|
||||
public:
|
||||
explicit ServerSession(KernelCore& kernel);
|
||||
~ServerSession() override;
|
||||
|
||||
friend class Session;
|
||||
|
||||
static ResultVal<std::shared_ptr<ServerSession>> Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name = "Unknown");
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ServerSession";
|
||||
}
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ServerSession;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
Session* GetParent() {
|
||||
return parent.get();
|
||||
}
|
||||
|
||||
const Session* GetParent() const {
|
||||
return parent.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the HLE handler for the session. This handler will be called to service IPC requests
|
||||
* instead of the regular IPC machinery. (The regular IPC machinery is currently not
|
||||
* implemented.)
|
||||
*/
|
||||
void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) {
|
||||
hle_handler = std::move(hle_handler_);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle a sync request from the emulated application.
|
||||
*
|
||||
* @param thread Thread that initiated the request.
|
||||
* @param memory Memory context to handle the sync request under.
|
||||
* @param core_timing Core timing context to schedule the request event under.
|
||||
*
|
||||
* @returns ResultCode from the operation.
|
||||
*/
|
||||
ResultCode HandleSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
/// Called when a client disconnection occurs.
|
||||
void ClientDisconnected();
|
||||
|
||||
/// Adds a new domain request handler to the collection of request handlers within
|
||||
/// this ServerSession instance.
|
||||
void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler);
|
||||
|
||||
/// Retrieves the total number of domain request handlers that have been
|
||||
/// appended to this ServerSession instance.
|
||||
std::size_t NumDomainRequestHandlers() const;
|
||||
|
||||
/// Returns true if the session has been converted to a domain, otherwise False
|
||||
bool IsDomain() const {
|
||||
return !IsSession();
|
||||
}
|
||||
|
||||
/// Returns true if this session has not been converted to a domain, otherwise false.
|
||||
bool IsSession() const {
|
||||
return domain_request_handlers.empty();
|
||||
}
|
||||
|
||||
/// Converts the session to a domain at the end of the current command
|
||||
void ConvertToDomain() {
|
||||
convert_to_domain = true;
|
||||
}
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
private:
|
||||
/// Queues a sync request from the emulated application.
|
||||
ResultCode QueueSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory);
|
||||
|
||||
/// Completes a sync request from the emulated application.
|
||||
ResultCode CompleteSyncRequest(HLERequestContext& context);
|
||||
|
||||
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
|
||||
/// object handle.
|
||||
ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
|
||||
|
||||
/// The parent session, which links to the client endpoint.
|
||||
std::shared_ptr<Session> parent;
|
||||
|
||||
/// This session's HLE request handler (applicable when not a domain)
|
||||
std::shared_ptr<SessionRequestHandler> hle_handler;
|
||||
|
||||
/// This is the list of domain request handlers (after conversion to a domain)
|
||||
std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
|
||||
|
||||
/// List of threads that are pending a response after a sync request. This list is processed in
|
||||
/// a LIFO manner, thus, the last request will be dispatched first.
|
||||
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
|
||||
std::vector<std::shared_ptr<KThread>> pending_requesting_threads;
|
||||
|
||||
/// Thread whose request is currently being handled. A request is considered "handled" when a
|
||||
/// response is sent via svcReplyAndReceive.
|
||||
/// TODO(Subv): Find a better name for this.
|
||||
std::shared_ptr<KThread> currently_handling;
|
||||
|
||||
/// When set to True, converts the session to a domain at the end of the command
|
||||
bool convert_to_domain{};
|
||||
|
||||
/// The name of this session (optional)
|
||||
std::string name;
|
||||
|
||||
/// Thread to dispatch service requests
|
||||
std::weak_ptr<ServiceThread> service_thread;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,41 +0,0 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
Session::~Session() {
|
||||
// Release reserved resource when the Session pair was created.
|
||||
kernel.GetSystemResourceLimit()->Release(LimitableResource::Sessions, 1);
|
||||
}
|
||||
|
||||
Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
|
||||
// Reserve a new session from the resource limit.
|
||||
KScopedResourceReservation session_reservation(kernel.GetSystemResourceLimit(),
|
||||
LimitableResource::Sessions);
|
||||
ASSERT(session_reservation.Succeeded());
|
||||
auto session{std::make_shared<Session>(kernel)};
|
||||
auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()};
|
||||
auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()};
|
||||
|
||||
session->name = std::move(name);
|
||||
session->client = client_session;
|
||||
session->server = server_session;
|
||||
|
||||
session_reservation.Commit();
|
||||
return std::make_pair(std::move(client_session), std::move(server_session));
|
||||
}
|
||||
|
||||
bool Session::IsSignaled() const {
|
||||
UNIMPLEMENTED();
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,64 +0,0 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ClientSession;
|
||||
class ServerSession;
|
||||
|
||||
/**
|
||||
* Parent structure to link the client and server endpoints of a session with their associated
|
||||
* client port.
|
||||
*/
|
||||
class Session final : public KSynchronizationObject {
|
||||
public:
|
||||
explicit Session(KernelCore& kernel);
|
||||
~Session() override;
|
||||
|
||||
using SessionPair = std::pair<std::shared_ptr<ClientSession>, std::shared_ptr<ServerSession>>;
|
||||
|
||||
static SessionPair Create(KernelCore& kernel, std::string name = "Unknown");
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::Session;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
std::shared_ptr<ClientSession> Client() {
|
||||
if (auto result{client.lock()}) {
|
||||
return result;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::shared_ptr<ServerSession> Server() {
|
||||
if (auto result{server.lock()}) {
|
||||
return result;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
std::string name;
|
||||
std::weak_ptr<ClientSession> client;
|
||||
std::weak_ptr<ServerSession> server;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,66 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
SharedMemory::SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory)
|
||||
: Object{kernel}, device_memory{device_memory} {}
|
||||
|
||||
SharedMemory::~SharedMemory() {
|
||||
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
|
||||
}
|
||||
|
||||
std::shared_ptr<SharedMemory> SharedMemory::Create(
|
||||
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
|
||||
Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission,
|
||||
Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
|
||||
std::string name) {
|
||||
|
||||
const auto resource_limit = kernel.GetSystemResourceLimit();
|
||||
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||
size);
|
||||
ASSERT(memory_reservation.Succeeded());
|
||||
|
||||
std::shared_ptr<SharedMemory> shared_memory{
|
||||
std::make_shared<SharedMemory>(kernel, device_memory)};
|
||||
|
||||
shared_memory->owner_process = owner_process;
|
||||
shared_memory->page_list = std::move(page_list);
|
||||
shared_memory->owner_permission = owner_permission;
|
||||
shared_memory->user_permission = user_permission;
|
||||
shared_memory->physical_address = physical_address;
|
||||
shared_memory->size = size;
|
||||
shared_memory->name = name;
|
||||
|
||||
memory_reservation.Commit();
|
||||
return shared_memory;
|
||||
}
|
||||
|
||||
ResultCode SharedMemory::Map(Process& target_process, VAddr address, std::size_t size,
|
||||
Memory::MemoryPermission permissions) {
|
||||
const u64 page_count{(size + Memory::PageSize - 1) / Memory::PageSize};
|
||||
|
||||
if (page_list.GetNumPages() != page_count) {
|
||||
UNIMPLEMENTED_MSG("Page count does not match");
|
||||
}
|
||||
|
||||
const Memory::MemoryPermission expected =
|
||||
&target_process == owner_process ? owner_permission : user_permission;
|
||||
|
||||
if (permissions != expected) {
|
||||
UNIMPLEMENTED_MSG("Permission does not match");
|
||||
}
|
||||
|
||||
return target_process.PageTable().MapPages(address, page_list, Memory::MemoryState::Shared,
|
||||
permissions);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,87 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/page_linked_list.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
class SharedMemory final : public Object {
|
||||
public:
|
||||
explicit SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory);
|
||||
~SharedMemory() override;
|
||||
|
||||
static std::shared_ptr<SharedMemory> Create(
|
||||
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
|
||||
Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission,
|
||||
Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
|
||||
std::string name);
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "SharedMemory";
|
||||
}
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::SharedMemory;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps a shared memory block to an address in the target process' address space
|
||||
* @param target_process Process on which to map the memory block
|
||||
* @param address Address in system memory to map shared memory block to
|
||||
* @param size Size of the shared memory block to map
|
||||
* @param permissions Memory block map permissions (specified by SVC field)
|
||||
*/
|
||||
ResultCode Map(Process& target_process, VAddr address, std::size_t size,
|
||||
Memory::MemoryPermission permissions);
|
||||
|
||||
/**
|
||||
* Gets a pointer to the shared memory block
|
||||
* @param offset Offset from the start of the shared memory block to get pointer
|
||||
* @return A pointer to the shared memory block from the specified offset
|
||||
*/
|
||||
u8* GetPointer(std::size_t offset = 0) {
|
||||
return device_memory.GetPointer(physical_address + offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a pointer to the shared memory block
|
||||
* @param offset Offset from the start of the shared memory block to get pointer
|
||||
* @return A pointer to the shared memory block from the specified offset
|
||||
*/
|
||||
const u8* GetPointer(std::size_t offset = 0) const {
|
||||
return device_memory.GetPointer(physical_address + offset);
|
||||
}
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
private:
|
||||
Core::DeviceMemory& device_memory;
|
||||
Process* owner_process{};
|
||||
Memory::PageLinkedList page_list;
|
||||
Memory::MemoryPermission owner_permission{};
|
||||
Memory::MemoryPermission user_permission{};
|
||||
PAddr physical_address{};
|
||||
std::size_t size{};
|
||||
std::string name;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,116 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/synchronization.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Synchronization::Synchronization(Core::System& system) : system{system} {}
|
||||
|
||||
void Synchronization::SignalObject(SynchronizationObject& obj) const {
|
||||
auto& kernel = system.Kernel();
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (obj.IsSignaled()) {
|
||||
for (auto thread : obj.GetWaitingThreads()) {
|
||||
if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
|
||||
if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) {
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
|
||||
ASSERT(thread->IsWaitingSync());
|
||||
}
|
||||
thread->SetSynchronizationResults(&obj, RESULT_SUCCESS);
|
||||
thread->ResumeFromWait();
|
||||
}
|
||||
}
|
||||
obj.ClearWaitingThreads();
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<ResultCode, Handle> Synchronization::WaitFor(
|
||||
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
|
||||
auto& kernel = system.Kernel();
|
||||
auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
|
||||
const auto itr =
|
||||
std::find_if(sync_objects.begin(), sync_objects.end(),
|
||||
[thread](const std::shared_ptr<SynchronizationObject>& object) {
|
||||
return object->IsSignaled();
|
||||
});
|
||||
|
||||
if (itr != sync_objects.end()) {
|
||||
// We found a ready object, acquire it and set the result value
|
||||
SynchronizationObject* object = itr->get();
|
||||
object->Acquire(thread);
|
||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
||||
lock.CancelSleep();
|
||||
return {RESULT_SUCCESS, index};
|
||||
}
|
||||
|
||||
if (nano_seconds == 0) {
|
||||
lock.CancelSleep();
|
||||
return {RESULT_TIMEOUT, InvalidHandle};
|
||||
}
|
||||
|
||||
if (thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return {ERR_THREAD_TERMINATING, InvalidHandle};
|
||||
}
|
||||
|
||||
if (thread->IsSyncCancelled()) {
|
||||
thread->SetSyncCancelled(false);
|
||||
lock.CancelSleep();
|
||||
return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
|
||||
}
|
||||
|
||||
for (auto& object : sync_objects) {
|
||||
object->AddWaitingThread(SharedFrom(thread));
|
||||
}
|
||||
|
||||
thread->SetSynchronizationObjects(&sync_objects);
|
||||
thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
thread->SetStatus(ThreadStatus::WaitSynch);
|
||||
thread->SetWaitingSync(true);
|
||||
}
|
||||
thread->SetWaitingSync(false);
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
ResultCode signaling_result = thread->GetSignalingResult();
|
||||
SynchronizationObject* signaling_object = thread->GetSignalingObject();
|
||||
thread->SetSynchronizationObjects(nullptr);
|
||||
auto shared_thread = SharedFrom(thread);
|
||||
for (auto& obj : sync_objects) {
|
||||
obj->RemoveWaitingThread(shared_thread);
|
||||
}
|
||||
if (signaling_object != nullptr) {
|
||||
const auto itr = std::find_if(
|
||||
sync_objects.begin(), sync_objects.end(),
|
||||
[signaling_object](const std::shared_ptr<SynchronizationObject>& object) {
|
||||
return object.get() == signaling_object;
|
||||
});
|
||||
ASSERT(itr != sync_objects.end());
|
||||
signaling_object->Acquire(thread);
|
||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
||||
return {signaling_result, index};
|
||||
}
|
||||
return {signaling_result, -1};
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,44 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class SynchronizationObject;
|
||||
|
||||
/**
|
||||
* The 'Synchronization' class is an interface for handling synchronization methods
|
||||
* used by Synchronization objects and synchronization SVCs. This centralizes processing of
|
||||
* such
|
||||
*/
|
||||
class Synchronization {
|
||||
public:
|
||||
explicit Synchronization(Core::System& system);
|
||||
|
||||
/// Signals a synchronization object, waking up all its waiting threads
|
||||
void SignalObject(SynchronizationObject& obj) const;
|
||||
|
||||
/// Tries to see if waiting for any of the sync_objects is necessary, if not
|
||||
/// it returns Success and the handle index of the signaled sync object. In
|
||||
/// case not, the current thread will be locked and wait for nano_seconds or
|
||||
/// for a synchronization object to signal.
|
||||
std::pair<ResultCode, Handle> WaitFor(
|
||||
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
};
|
||||
} // namespace Kernel
|
@@ -1,49 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/synchronization.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
SynchronizationObject::SynchronizationObject(KernelCore& kernel) : Object{kernel} {}
|
||||
SynchronizationObject::~SynchronizationObject() = default;
|
||||
|
||||
void SynchronizationObject::Signal() {
|
||||
kernel.Synchronization().SignalObject(*this);
|
||||
}
|
||||
|
||||
void SynchronizationObject::AddWaitingThread(std::shared_ptr<Thread> thread) {
|
||||
auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
|
||||
if (itr == waiting_threads.end())
|
||||
waiting_threads.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread) {
|
||||
auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
|
||||
// If a thread passed multiple handles to the same object,
|
||||
// the kernel might attempt to remove the thread from the object's
|
||||
// waiting threads list multiple times.
|
||||
if (itr != waiting_threads.end())
|
||||
waiting_threads.erase(itr);
|
||||
}
|
||||
|
||||
void SynchronizationObject::ClearWaitingThreads() {
|
||||
waiting_threads.clear();
|
||||
}
|
||||
|
||||
const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const {
|
||||
return waiting_threads;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,77 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Synchronization;
|
||||
class Thread;
|
||||
|
||||
/// Class that represents a Kernel object that a thread can be waiting on
|
||||
class SynchronizationObject : public Object {
|
||||
public:
|
||||
explicit SynchronizationObject(KernelCore& kernel);
|
||||
~SynchronizationObject() override;
|
||||
|
||||
/**
|
||||
* Check if the specified thread should wait until the object is available
|
||||
* @param thread The thread about which we're deciding.
|
||||
* @return True if the current thread should wait due to this object being unavailable
|
||||
*/
|
||||
virtual bool ShouldWait(const Thread* thread) const = 0;
|
||||
|
||||
/// Acquire/lock the object for the specified thread if it is available
|
||||
virtual void Acquire(Thread* thread) = 0;
|
||||
|
||||
/// Signal this object
|
||||
virtual void Signal();
|
||||
|
||||
virtual bool IsSignaled() const {
|
||||
return is_signaled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a thread to wait on this object
|
||||
* @param thread Pointer to thread to add
|
||||
*/
|
||||
void AddWaitingThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/**
|
||||
* Removes a thread from waiting on this object (e.g. if it was resumed already)
|
||||
* @param thread Pointer to thread to remove
|
||||
*/
|
||||
void RemoveWaitingThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Get a const reference to the waiting threads list for debug use
|
||||
const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
|
||||
|
||||
void ClearWaitingThreads();
|
||||
|
||||
protected:
|
||||
std::atomic_bool is_signaled{}; // Tells if this sync object is signaled
|
||||
|
||||
private:
|
||||
/// Threads waiting for this object to become available
|
||||
std::vector<std::shared_ptr<Thread>> waiting_threads;
|
||||
};
|
||||
|
||||
// Specialization of DynamicObjectCast for SynchronizationObjects
|
||||
template <>
|
||||
inline std::shared_ptr<SynchronizationObject> DynamicObjectCast<SynchronizationObject>(
|
||||
std::shared_ptr<Object> object) {
|
||||
if (object != nullptr && object->IsWaitable()) {
|
||||
return std::static_pointer_cast<SynchronizationObject>(object);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,460 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project / PPSSPP Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/fiber.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/thread_queue_list.h"
|
||||
#include "core/core.h"
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
#include "core/arm/dynarmic/arm_dynarmic_32.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
||||
#endif
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
bool Thread::IsSignaled() const {
|
||||
return signaled;
|
||||
}
|
||||
|
||||
Thread::Thread(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
Thread::~Thread() = default;
|
||||
|
||||
void Thread::Stop() {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetState(ThreadState::Terminated);
|
||||
signaled = true;
|
||||
NotifyAvailable();
|
||||
kernel.GlobalHandleTable().Close(global_handle);
|
||||
|
||||
if (owner_process) {
|
||||
owner_process->UnregisterThread(this);
|
||||
|
||||
// Mark the TLS slot in the thread's page as free.
|
||||
owner_process->FreeTLSRegion(tls_address);
|
||||
}
|
||||
has_exited = true;
|
||||
}
|
||||
global_handle = 0;
|
||||
}
|
||||
|
||||
void Thread::Wakeup() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetState(ThreadState::Runnable);
|
||||
}
|
||||
|
||||
ResultCode Thread::Start() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetState(ThreadState::Runnable);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
void Thread::CancelWait() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (GetState() != ThreadState::Waiting || !is_cancellable) {
|
||||
is_sync_cancelled = true;
|
||||
return;
|
||||
}
|
||||
// TODO(Blinkhawk): Implement cancel of server session
|
||||
is_sync_cancelled = false;
|
||||
SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
|
||||
SetState(ThreadState::Runnable);
|
||||
}
|
||||
|
||||
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
|
||||
u32 entry_point, u32 arg) {
|
||||
context = {};
|
||||
context.cpu_registers[0] = arg;
|
||||
context.cpu_registers[15] = entry_point;
|
||||
context.cpu_registers[13] = stack_top;
|
||||
}
|
||||
|
||||
static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
|
||||
VAddr entry_point, u64 arg) {
|
||||
context = {};
|
||||
context.cpu_registers[0] = arg;
|
||||
context.pc = entry_point;
|
||||
context.sp = stack_top;
|
||||
// TODO(merry): Perform a hardware test to determine the below value.
|
||||
context.fpcr = 0;
|
||||
}
|
||||
|
||||
std::shared_ptr<Common::Fiber>& Thread::GetHostContext() {
|
||||
return host_context;
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point, u32 priority,
|
||||
u64 arg, s32 processor_id, VAddr stack_top,
|
||||
Process* owner_process) {
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
|
||||
owner_process, std::move(init_func), init_func_parameter);
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point, u32 priority,
|
||||
u64 arg, s32 processor_id, VAddr stack_top,
|
||||
Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func,
|
||||
void* thread_start_parameter) {
|
||||
auto& kernel = system.Kernel();
|
||||
// Check if priority is in ranged. Lowest priority -> highest priority id.
|
||||
if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) {
|
||||
LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
|
||||
return ERR_INVALID_THREAD_PRIORITY;
|
||||
}
|
||||
|
||||
if (processor_id > THREADPROCESSORID_MAX) {
|
||||
LOG_ERROR(Kernel_SVC, "Invalid processor id: {}", processor_id);
|
||||
return ERR_INVALID_PROCESSOR_ID;
|
||||
}
|
||||
|
||||
if (owner_process) {
|
||||
if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) {
|
||||
LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
|
||||
// TODO (bunnei): Find the correct error code to use here
|
||||
return RESULT_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
|
||||
|
||||
thread->thread_id = kernel.CreateNewThreadID();
|
||||
thread->thread_state = ThreadState::Initialized;
|
||||
thread->entry_point = entry_point;
|
||||
thread->stack_top = stack_top;
|
||||
thread->disable_count = 1;
|
||||
thread->tpidr_el0 = 0;
|
||||
thread->current_priority = priority;
|
||||
thread->base_priority = priority;
|
||||
thread->lock_owner = nullptr;
|
||||
thread->schedule_count = -1;
|
||||
thread->last_scheduled_tick = 0;
|
||||
thread->processor_id = processor_id;
|
||||
thread->ideal_core = processor_id;
|
||||
thread->affinity_mask.SetAffinity(processor_id, true);
|
||||
thread->name = std::move(name);
|
||||
thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
|
||||
thread->owner_process = owner_process;
|
||||
thread->type = type_flags;
|
||||
thread->signaled = false;
|
||||
if ((type_flags & THREADTYPE_IDLE) == 0) {
|
||||
auto& scheduler = kernel.GlobalSchedulerContext();
|
||||
scheduler.AddThread(thread);
|
||||
}
|
||||
if (owner_process) {
|
||||
thread->tls_address = thread->owner_process->CreateTLSRegion();
|
||||
thread->owner_process->RegisterThread(thread.get());
|
||||
} else {
|
||||
thread->tls_address = 0;
|
||||
}
|
||||
|
||||
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
|
||||
// to initialize the context
|
||||
if ((type_flags & THREADTYPE_HLE) == 0) {
|
||||
ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
|
||||
static_cast<u32>(entry_point), static_cast<u32>(arg));
|
||||
ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
|
||||
}
|
||||
thread->host_context =
|
||||
std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
|
||||
|
||||
return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
|
||||
}
|
||||
|
||||
void Thread::SetBasePriority(u32 priority) {
|
||||
ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
|
||||
"Invalid priority value.");
|
||||
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
// Change our base priority.
|
||||
base_priority = priority;
|
||||
|
||||
// Perform a priority restoration.
|
||||
RestorePriority(kernel, this);
|
||||
}
|
||||
|
||||
void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) {
|
||||
signaling_object = object;
|
||||
signaling_result = result;
|
||||
}
|
||||
|
||||
VAddr Thread::GetCommandBufferAddress() const {
|
||||
// Offset from the start of TLS at which the IPC command buffer begins.
|
||||
constexpr u64 command_header_offset = 0x80;
|
||||
return GetTLSAddress() + command_header_offset;
|
||||
}
|
||||
|
||||
void Thread::SetState(ThreadState state) {
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
// Clear debugging state
|
||||
SetMutexWaitAddressForDebugging({});
|
||||
SetWaitReasonForDebugging({});
|
||||
|
||||
const ThreadState old_state = thread_state;
|
||||
thread_state =
|
||||
static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
|
||||
if (thread_state != old_state) {
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::AddWaiterImpl(Thread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Find the right spot to insert the waiter.
|
||||
auto it = waiter_list.begin();
|
||||
while (it != waiter_list.end()) {
|
||||
if (it->GetPriority() > thread->GetPriority()) {
|
||||
break;
|
||||
}
|
||||
it++;
|
||||
}
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters++) >= 0);
|
||||
}
|
||||
|
||||
// Insert the waiter.
|
||||
waiter_list.insert(it, *thread);
|
||||
thread->SetLockOwner(this);
|
||||
}
|
||||
|
||||
void Thread::RemoveWaiterImpl(Thread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters--) > 0);
|
||||
}
|
||||
|
||||
// Remove the waiter.
|
||||
waiter_list.erase(waiter_list.iterator_to(*thread));
|
||||
thread->SetLockOwner(nullptr);
|
||||
}
|
||||
|
||||
void Thread::RestorePriority(KernelCore& kernel, Thread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
while (true) {
|
||||
// We want to inherit priority where possible.
|
||||
s32 new_priority = thread->GetBasePriority();
|
||||
if (thread->HasWaiters()) {
|
||||
new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
|
||||
}
|
||||
|
||||
// If the priority we would inherit is not different from ours, don't do anything.
|
||||
if (new_priority == thread->GetPriority()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure we don't violate condition variable red black tree invariants.
|
||||
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
||||
BeforeUpdatePriority(kernel, cv_tree, thread);
|
||||
}
|
||||
|
||||
// Change the priority.
|
||||
const s32 old_priority = thread->GetPriority();
|
||||
thread->SetPriority(new_priority);
|
||||
|
||||
// Restore the condition variable, if relevant.
|
||||
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
||||
AfterUpdatePriority(kernel, cv_tree, thread);
|
||||
}
|
||||
|
||||
// Update the scheduler.
|
||||
KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
|
||||
|
||||
// Keep the lock owner up to date.
|
||||
Thread* lock_owner = thread->GetLockOwner();
|
||||
if (lock_owner == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the thread in the lock owner's sorted list, and continue inheriting.
|
||||
lock_owner->RemoveWaiterImpl(thread);
|
||||
lock_owner->AddWaiterImpl(thread);
|
||||
thread = lock_owner;
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::AddWaiter(Thread* thread) {
|
||||
AddWaiterImpl(thread);
|
||||
RestorePriority(kernel, this);
|
||||
}
|
||||
|
||||
void Thread::RemoveWaiter(Thread* thread) {
|
||||
RemoveWaiterImpl(thread);
|
||||
RestorePriority(kernel, this);
|
||||
}
|
||||
|
||||
Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
s32 num_waiters{};
|
||||
Thread* next_lock_owner{};
|
||||
auto it = waiter_list.begin();
|
||||
while (it != waiter_list.end()) {
|
||||
if (it->GetAddressKey() == key) {
|
||||
Thread* thread = std::addressof(*it);
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters--) > 0);
|
||||
}
|
||||
it = waiter_list.erase(it);
|
||||
|
||||
// Update the next lock owner.
|
||||
if (next_lock_owner == nullptr) {
|
||||
next_lock_owner = thread;
|
||||
next_lock_owner->SetLockOwner(nullptr);
|
||||
} else {
|
||||
next_lock_owner->AddWaiterImpl(thread);
|
||||
}
|
||||
num_waiters++;
|
||||
} else {
|
||||
it++;
|
||||
}
|
||||
}
|
||||
|
||||
// Do priority updates, if we have a next owner.
|
||||
if (next_lock_owner) {
|
||||
RestorePriority(kernel, this);
|
||||
RestorePriority(kernel, next_lock_owner);
|
||||
}
|
||||
|
||||
// Return output.
|
||||
*out_num_waiters = num_waiters;
|
||||
return next_lock_owner;
|
||||
}
|
||||
|
||||
ResultCode Thread::SetActivity(ThreadActivity value) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
auto sched_status = GetState();
|
||||
|
||||
if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
if (IsTerminationRequested()) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
if (value == ThreadActivity::Paused) {
|
||||
if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
|
||||
} else {
|
||||
if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode Thread::Sleep(s64 nanoseconds) {
|
||||
Handle event_handle{};
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
|
||||
SetState(ThreadState::Waiting);
|
||||
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
|
||||
const auto old_state = GetRawState();
|
||||
pausing_state |= static_cast<u32>(flag);
|
||||
const auto base_scheduling = GetState();
|
||||
thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
|
||||
const auto old_state = GetRawState();
|
||||
pausing_state &= ~static_cast<u32>(flag);
|
||||
const auto base_scheduling = GetState();
|
||||
thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
|
||||
for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
|
||||
if (((mask >> core) & 1) != 0) {
|
||||
return core;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
};
|
||||
|
||||
const bool use_override = affinity_override_count != 0;
|
||||
if (new_core == THREADPROCESSORID_DONT_UPDATE) {
|
||||
new_core = use_override ? ideal_core_override : ideal_core;
|
||||
if ((new_affinity_mask & (1ULL << new_core)) == 0) {
|
||||
LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}",
|
||||
new_core, new_affinity_mask);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
}
|
||||
if (use_override) {
|
||||
ideal_core_override = new_core;
|
||||
} else {
|
||||
const auto old_affinity_mask = affinity_mask;
|
||||
affinity_mask.SetAffinityMask(new_affinity_mask);
|
||||
ideal_core = new_core;
|
||||
if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) {
|
||||
const s32 old_core = processor_id;
|
||||
if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
|
||||
if (static_cast<s32>(ideal_core) < 0) {
|
||||
processor_id = HighestSetCore(affinity_mask.GetAffinityMask(),
|
||||
Core::Hardware::NUM_CPU_CORES);
|
||||
} else {
|
||||
processor_id = ideal_core;
|
||||
}
|
||||
}
|
||||
KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core);
|
||||
}
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,782 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project / PPSSPP Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <functional>
|
||||
#include <span>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/intrusive/list.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/hle/kernel/k_affinity_mask.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Common {
|
||||
class Fiber;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
class ARM_Interface;
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class GlobalSchedulerContext;
|
||||
class KernelCore;
|
||||
class Process;
|
||||
class KScheduler;
|
||||
|
||||
enum ThreadPriority : u32 {
|
||||
THREADPRIO_HIGHEST = 0, ///< Highest thread priority
|
||||
THREADPRIO_MAX_CORE_MIGRATION = 2, ///< Highest priority for a core migration
|
||||
THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
|
||||
THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
|
||||
THREADPRIO_LOWEST = 63, ///< Lowest thread priority
|
||||
THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities.
|
||||
};
|
||||
|
||||
enum ThreadType : u32 {
|
||||
THREADTYPE_USER = 0x1,
|
||||
THREADTYPE_KERNEL = 0x2,
|
||||
THREADTYPE_HLE = 0x4,
|
||||
THREADTYPE_IDLE = 0x8,
|
||||
THREADTYPE_SUSPEND = 0x10,
|
||||
};
|
||||
|
||||
enum ThreadProcessorId : s32 {
|
||||
/// Indicates that no particular processor core is preferred.
|
||||
THREADPROCESSORID_DONT_CARE = -1,
|
||||
|
||||
/// Run thread on the ideal core specified by the process.
|
||||
THREADPROCESSORID_IDEAL = -2,
|
||||
|
||||
/// Indicates that the preferred processor ID shouldn't be updated in
|
||||
/// a core mask setting operation.
|
||||
THREADPROCESSORID_DONT_UPDATE = -3,
|
||||
|
||||
THREADPROCESSORID_0 = 0, ///< Run thread on core 0
|
||||
THREADPROCESSORID_1 = 1, ///< Run thread on core 1
|
||||
THREADPROCESSORID_2 = 2, ///< Run thread on core 2
|
||||
THREADPROCESSORID_3 = 3, ///< Run thread on core 3
|
||||
THREADPROCESSORID_MAX = 4, ///< Processor ID must be less than this
|
||||
|
||||
/// Allowed CPU mask
|
||||
THREADPROCESSORID_DEFAULT_MASK = (1 << THREADPROCESSORID_0) | (1 << THREADPROCESSORID_1) |
|
||||
(1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3)
|
||||
};
|
||||
|
||||
enum class ThreadState : u16 {
|
||||
Initialized = 0,
|
||||
Waiting = 1,
|
||||
Runnable = 2,
|
||||
Terminated = 3,
|
||||
|
||||
SuspendShift = 4,
|
||||
Mask = (1 << SuspendShift) - 1,
|
||||
|
||||
ProcessSuspended = (1 << (0 + SuspendShift)),
|
||||
ThreadSuspended = (1 << (1 + SuspendShift)),
|
||||
DebugSuspended = (1 << (2 + SuspendShift)),
|
||||
BacktraceSuspended = (1 << (3 + SuspendShift)),
|
||||
InitSuspended = (1 << (4 + SuspendShift)),
|
||||
|
||||
SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
|
||||
|
||||
enum class ThreadWakeupReason {
|
||||
Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal.
|
||||
Timeout // The thread was woken up due to a wait timeout.
|
||||
};
|
||||
|
||||
enum class ThreadActivity : u32 {
|
||||
Normal = 0,
|
||||
Paused = 1,
|
||||
};
|
||||
|
||||
enum class ThreadSchedFlags : u32 {
|
||||
ProcessPauseFlag = 1 << 4,
|
||||
ThreadPauseFlag = 1 << 5,
|
||||
ProcessDebugPauseFlag = 1 << 6,
|
||||
KernelInitPauseFlag = 1 << 8,
|
||||
};
|
||||
|
||||
enum class ThreadWaitReasonForDebugging : u32 {
|
||||
None, ///< Thread is not waiting
|
||||
Sleep, ///< Thread is waiting due to a SleepThread SVC
|
||||
IPC, ///< Thread is waiting for the reply from an IPC request
|
||||
Synchronization, ///< Thread is waiting due to a WaitSynchronization SVC
|
||||
ConditionVar, ///< Thread is waiting due to a WaitProcessWideKey SVC
|
||||
Arbitration, ///< Thread is waiting due to a SignalToAddress/WaitForAddress SVC
|
||||
Suspended, ///< Thread is waiting due to process suspension
|
||||
};
|
||||
|
||||
class Thread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
|
||||
friend class KScheduler;
|
||||
friend class Process;
|
||||
|
||||
public:
|
||||
explicit Thread(KernelCore& kernel);
|
||||
~Thread() override;
|
||||
|
||||
using MutexWaitingThreads = std::vector<std::shared_ptr<Thread>>;
|
||||
|
||||
using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
|
||||
using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
* @param priority The thread's priority
|
||||
* @param arg User data to pass to the thread
|
||||
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
|
||||
* @param stack_top The address of the thread's stack top
|
||||
* @param owner_process The parent process for the thread, if null, it's a kernel thread
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process);
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
* @param priority The thread's priority
|
||||
* @param arg User data to pass to the thread
|
||||
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
|
||||
* @param stack_top The address of the thread's stack top
|
||||
* @param owner_process The parent process for the thread, if null, it's a kernel thread
|
||||
* @param thread_start_func The function where the host context will start.
|
||||
* @param thread_start_parameter The parameter which will passed to host context on init
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func,
|
||||
void* thread_start_parameter);
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
void SetName(std::string new_name) {
|
||||
name = std::move(new_name);
|
||||
}
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "Thread";
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the thread's current priority
|
||||
* @return The current thread's priority
|
||||
*/
|
||||
[[nodiscard]] s32 GetPriority() const {
|
||||
return current_priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the thread's current priority.
|
||||
* @param priority The new priority.
|
||||
*/
|
||||
void SetPriority(s32 priority) {
|
||||
current_priority = priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the thread's nominal priority.
|
||||
* @return The current thread's nominal priority.
|
||||
*/
|
||||
[[nodiscard]] s32 GetBasePriority() const {
|
||||
return base_priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the thread's nominal priority.
|
||||
* @param priority The new priority.
|
||||
*/
|
||||
void SetBasePriority(u32 priority);
|
||||
|
||||
/// Changes the core that the thread is running or scheduled to run on.
|
||||
[[nodiscard]] ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
|
||||
|
||||
/**
|
||||
* Gets the thread's thread ID
|
||||
* @return The thread's ID
|
||||
*/
|
||||
[[nodiscard]] u64 GetThreadID() const {
|
||||
return thread_id;
|
||||
}
|
||||
|
||||
/// Resumes a thread from waiting
|
||||
void Wakeup();
|
||||
|
||||
ResultCode Start();
|
||||
|
||||
virtual bool IsSignaled() const override;
|
||||
|
||||
/// Cancels a waiting operation that this thread may or may not be within.
|
||||
///
|
||||
/// When the thread is within a waiting state, this will set the thread's
|
||||
/// waiting result to signal a canceled wait. The function will then resume
|
||||
/// this thread.
|
||||
///
|
||||
void CancelWait();
|
||||
|
||||
void SetSynchronizationResults(KSynchronizationObject* object, ResultCode result);
|
||||
|
||||
void SetSyncedObject(KSynchronizationObject* object, ResultCode result) {
|
||||
SetSynchronizationResults(object, result);
|
||||
}
|
||||
|
||||
ResultCode GetWaitResult(KSynchronizationObject** out) const {
|
||||
*out = signaling_object;
|
||||
return signaling_result;
|
||||
}
|
||||
|
||||
ResultCode GetSignalingResult() const {
|
||||
return signaling_result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops a thread, invalidating it from further use
|
||||
*/
|
||||
void Stop();
|
||||
|
||||
/*
|
||||
* Returns the Thread Local Storage address of the current thread
|
||||
* @returns VAddr of the thread's TLS
|
||||
*/
|
||||
VAddr GetTLSAddress() const {
|
||||
return tls_address;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
|
||||
* @returns The value of the TPIDR_EL0 register.
|
||||
*/
|
||||
u64 GetTPIDR_EL0() const {
|
||||
return tpidr_el0;
|
||||
}
|
||||
|
||||
/// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
|
||||
void SetTPIDR_EL0(u64 value) {
|
||||
tpidr_el0 = value;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the address of the current thread's command buffer, located in the TLS.
|
||||
* @returns VAddr of the thread's command buffer.
|
||||
*/
|
||||
VAddr GetCommandBufferAddress() const;
|
||||
|
||||
ThreadContext32& GetContext32() {
|
||||
return context_32;
|
||||
}
|
||||
|
||||
const ThreadContext32& GetContext32() const {
|
||||
return context_32;
|
||||
}
|
||||
|
||||
ThreadContext64& GetContext64() {
|
||||
return context_64;
|
||||
}
|
||||
|
||||
const ThreadContext64& GetContext64() const {
|
||||
return context_64;
|
||||
}
|
||||
|
||||
bool IsHLEThread() const {
|
||||
return (type & THREADTYPE_HLE) != 0;
|
||||
}
|
||||
|
||||
bool IsSuspendThread() const {
|
||||
return (type & THREADTYPE_SUSPEND) != 0;
|
||||
}
|
||||
|
||||
bool IsIdleThread() const {
|
||||
return (type & THREADTYPE_IDLE) != 0;
|
||||
}
|
||||
|
||||
bool WasRunning() const {
|
||||
return was_running;
|
||||
}
|
||||
|
||||
void SetWasRunning(bool value) {
|
||||
was_running = value;
|
||||
}
|
||||
|
||||
std::shared_ptr<Common::Fiber>& GetHostContext();
|
||||
|
||||
ThreadState GetState() const {
|
||||
return thread_state & ThreadState::Mask;
|
||||
}
|
||||
|
||||
ThreadState GetRawState() const {
|
||||
return thread_state;
|
||||
}
|
||||
|
||||
void SetState(ThreadState state);
|
||||
|
||||
s64 GetLastScheduledTick() const {
|
||||
return last_scheduled_tick;
|
||||
}
|
||||
|
||||
void SetLastScheduledTick(s64 tick) {
|
||||
last_scheduled_tick = tick;
|
||||
}
|
||||
|
||||
u64 GetTotalCPUTimeTicks() const {
|
||||
return total_cpu_time_ticks;
|
||||
}
|
||||
|
||||
void UpdateCPUTimeTicks(u64 ticks) {
|
||||
total_cpu_time_ticks += ticks;
|
||||
}
|
||||
|
||||
s32 GetProcessorID() const {
|
||||
return processor_id;
|
||||
}
|
||||
|
||||
s32 GetActiveCore() const {
|
||||
return GetProcessorID();
|
||||
}
|
||||
|
||||
void SetProcessorID(s32 new_core) {
|
||||
processor_id = new_core;
|
||||
}
|
||||
|
||||
void SetActiveCore(s32 new_core) {
|
||||
processor_id = new_core;
|
||||
}
|
||||
|
||||
Process* GetOwnerProcess() {
|
||||
return owner_process;
|
||||
}
|
||||
|
||||
const Process* GetOwnerProcess() const {
|
||||
return owner_process;
|
||||
}
|
||||
|
||||
const MutexWaitingThreads& GetMutexWaitingThreads() const {
|
||||
return wait_mutex_threads;
|
||||
}
|
||||
|
||||
Thread* GetLockOwner() const {
|
||||
return lock_owner;
|
||||
}
|
||||
|
||||
void SetLockOwner(Thread* owner) {
|
||||
lock_owner = owner;
|
||||
}
|
||||
|
||||
u32 GetIdealCore() const {
|
||||
return ideal_core;
|
||||
}
|
||||
|
||||
const KAffinityMask& GetAffinityMask() const {
|
||||
return affinity_mask;
|
||||
}
|
||||
|
||||
ResultCode SetActivity(ThreadActivity value);
|
||||
|
||||
/// Sleeps this thread for the given amount of nanoseconds.
|
||||
ResultCode Sleep(s64 nanoseconds);
|
||||
|
||||
s64 GetYieldScheduleCount() const {
|
||||
return schedule_count;
|
||||
}
|
||||
|
||||
void SetYieldScheduleCount(s64 count) {
|
||||
schedule_count = count;
|
||||
}
|
||||
|
||||
bool IsRunning() const {
|
||||
return is_running;
|
||||
}
|
||||
|
||||
void SetIsRunning(bool value) {
|
||||
is_running = value;
|
||||
}
|
||||
|
||||
bool IsWaitCancelled() const {
|
||||
return is_sync_cancelled;
|
||||
}
|
||||
|
||||
void ClearWaitCancelled() {
|
||||
is_sync_cancelled = false;
|
||||
}
|
||||
|
||||
Handle GetGlobalHandle() const {
|
||||
return global_handle;
|
||||
}
|
||||
|
||||
bool IsCancellable() const {
|
||||
return is_cancellable;
|
||||
}
|
||||
|
||||
void SetCancellable() {
|
||||
is_cancellable = true;
|
||||
}
|
||||
|
||||
void ClearCancellable() {
|
||||
is_cancellable = false;
|
||||
}
|
||||
|
||||
bool IsTerminationRequested() const {
|
||||
return will_be_terminated || GetRawState() == ThreadState::Terminated;
|
||||
}
|
||||
|
||||
bool IsPaused() const {
|
||||
return pausing_state != 0;
|
||||
}
|
||||
|
||||
bool IsContinuousOnSVC() const {
|
||||
return is_continuous_on_svc;
|
||||
}
|
||||
|
||||
void SetContinuousOnSVC(bool is_continuous) {
|
||||
is_continuous_on_svc = is_continuous;
|
||||
}
|
||||
|
||||
bool IsPhantomMode() const {
|
||||
return is_phantom_mode;
|
||||
}
|
||||
|
||||
void SetPhantomMode(bool phantom) {
|
||||
is_phantom_mode = phantom;
|
||||
}
|
||||
|
||||
bool HasExited() const {
|
||||
return has_exited;
|
||||
}
|
||||
|
||||
class QueueEntry {
|
||||
public:
|
||||
constexpr QueueEntry() = default;
|
||||
|
||||
constexpr void Initialize() {
|
||||
prev = nullptr;
|
||||
next = nullptr;
|
||||
}
|
||||
|
||||
constexpr Thread* GetPrev() const {
|
||||
return prev;
|
||||
}
|
||||
constexpr Thread* GetNext() const {
|
||||
return next;
|
||||
}
|
||||
constexpr void SetPrev(Thread* thread) {
|
||||
prev = thread;
|
||||
}
|
||||
constexpr void SetNext(Thread* thread) {
|
||||
next = thread;
|
||||
}
|
||||
|
||||
private:
|
||||
Thread* prev{};
|
||||
Thread* next{};
|
||||
};
|
||||
|
||||
QueueEntry& GetPriorityQueueEntry(s32 core) {
|
||||
return per_core_priority_queue_entry[core];
|
||||
}
|
||||
|
||||
const QueueEntry& GetPriorityQueueEntry(s32 core) const {
|
||||
return per_core_priority_queue_entry[core];
|
||||
}
|
||||
|
||||
s32 GetDisableDispatchCount() const {
|
||||
return disable_count;
|
||||
}
|
||||
|
||||
void DisableDispatch() {
|
||||
ASSERT(GetDisableDispatchCount() >= 0);
|
||||
disable_count++;
|
||||
}
|
||||
|
||||
void EnableDispatch() {
|
||||
ASSERT(GetDisableDispatchCount() > 0);
|
||||
disable_count--;
|
||||
}
|
||||
|
||||
void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
|
||||
wait_reason_for_debugging = reason;
|
||||
}
|
||||
|
||||
[[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
|
||||
return wait_reason_for_debugging;
|
||||
}
|
||||
|
||||
void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
|
||||
wait_objects_for_debugging.clear();
|
||||
wait_objects_for_debugging.reserve(objects.size());
|
||||
for (const auto& object : objects) {
|
||||
wait_objects_for_debugging.emplace_back(object);
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
|
||||
return wait_objects_for_debugging;
|
||||
}
|
||||
|
||||
void SetMutexWaitAddressForDebugging(VAddr address) {
|
||||
mutex_wait_address_for_debugging = address;
|
||||
}
|
||||
|
||||
[[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
|
||||
return mutex_wait_address_for_debugging;
|
||||
}
|
||||
|
||||
void AddWaiter(Thread* thread);
|
||||
|
||||
void RemoveWaiter(Thread* thread);
|
||||
|
||||
[[nodiscard]] Thread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
|
||||
|
||||
[[nodiscard]] VAddr GetAddressKey() const {
|
||||
return address_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] u32 GetAddressKeyValue() const {
|
||||
return address_key_value;
|
||||
}
|
||||
|
||||
void SetAddressKey(VAddr key) {
|
||||
address_key = key;
|
||||
}
|
||||
|
||||
void SetAddressKey(VAddr key, u32 val) {
|
||||
address_key = key;
|
||||
address_key_value = val;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t PriorityInheritanceCountMax = 10;
|
||||
union SyncObjectBuffer {
|
||||
std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
|
||||
std::array<Handle,
|
||||
Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
|
||||
handles;
|
||||
constexpr SyncObjectBuffer() {}
|
||||
};
|
||||
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
|
||||
|
||||
struct ConditionVariableComparator {
|
||||
struct LightCompareType {
|
||||
u64 cv_key{};
|
||||
s32 priority{};
|
||||
|
||||
[[nodiscard]] constexpr u64 GetConditionVariableKey() const {
|
||||
return cv_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr s32 GetPriority() const {
|
||||
return priority;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
requires(
|
||||
std::same_as<T, Thread> ||
|
||||
std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
|
||||
const Thread& rhs) {
|
||||
const uintptr_t l_key = lhs.GetConditionVariableKey();
|
||||
const uintptr_t r_key = rhs.GetConditionVariableKey();
|
||||
|
||||
if (l_key < r_key) {
|
||||
// Sort first by key
|
||||
return -1;
|
||||
} else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
|
||||
// And then by priority.
|
||||
return -1;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
|
||||
|
||||
using ConditionVariableThreadTreeTraits =
|
||||
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&Thread::condvar_arbiter_tree_node>;
|
||||
using ConditionVariableThreadTree =
|
||||
ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
|
||||
|
||||
public:
|
||||
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
|
||||
|
||||
[[nodiscard]] uintptr_t GetConditionVariableKey() const {
|
||||
return condvar_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] uintptr_t GetAddressArbiterKey() const {
|
||||
return condvar_key;
|
||||
}
|
||||
|
||||
void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, uintptr_t cv_key,
|
||||
u32 value) {
|
||||
condvar_tree = tree;
|
||||
condvar_key = cv_key;
|
||||
address_key = address;
|
||||
address_key_value = value;
|
||||
}
|
||||
|
||||
void ClearConditionVariable() {
|
||||
condvar_tree = nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsWaitingForConditionVariable() const {
|
||||
return condvar_tree != nullptr;
|
||||
}
|
||||
|
||||
void SetAddressArbiter(ConditionVariableThreadTree* tree, uintptr_t address) {
|
||||
condvar_tree = tree;
|
||||
condvar_key = address;
|
||||
}
|
||||
|
||||
void ClearAddressArbiter() {
|
||||
condvar_tree = nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsWaitingForAddressArbiter() const {
|
||||
return condvar_tree != nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
|
||||
return condvar_tree;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool HasWaiters() const {
|
||||
return !waiter_list.empty();
|
||||
}
|
||||
|
||||
private:
|
||||
void AddSchedulingFlag(ThreadSchedFlags flag);
|
||||
void RemoveSchedulingFlag(ThreadSchedFlags flag);
|
||||
void AddWaiterImpl(Thread* thread);
|
||||
void RemoveWaiterImpl(Thread* thread);
|
||||
static void RestorePriority(KernelCore& kernel, Thread* thread);
|
||||
|
||||
Common::SpinLock context_guard{};
|
||||
ThreadContext32 context_32{};
|
||||
ThreadContext64 context_64{};
|
||||
std::shared_ptr<Common::Fiber> host_context{};
|
||||
|
||||
ThreadState thread_state = ThreadState::Initialized;
|
||||
|
||||
u64 thread_id = 0;
|
||||
|
||||
VAddr entry_point = 0;
|
||||
VAddr stack_top = 0;
|
||||
std::atomic_int disable_count = 0;
|
||||
|
||||
ThreadType type;
|
||||
|
||||
/// Nominal thread priority, as set by the emulated application.
|
||||
/// The nominal priority is the thread priority without priority
|
||||
/// inheritance taken into account.
|
||||
s32 base_priority{};
|
||||
|
||||
/// Current thread priority. This may change over the course of the
|
||||
/// thread's lifetime in order to facilitate priority inheritance.
|
||||
s32 current_priority{};
|
||||
|
||||
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
|
||||
s64 schedule_count{};
|
||||
s64 last_scheduled_tick{};
|
||||
|
||||
s32 processor_id = 0;
|
||||
|
||||
VAddr tls_address = 0; ///< Virtual address of the Thread Local Storage of the thread
|
||||
u64 tpidr_el0 = 0; ///< TPIDR_EL0 read/write system register.
|
||||
|
||||
/// Process that owns this thread
|
||||
Process* owner_process;
|
||||
|
||||
/// Objects that the thread is waiting on, in the same order as they were
|
||||
/// passed to WaitSynchronization. This is used for debugging only.
|
||||
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
||||
|
||||
/// The current mutex wait address. This is used for debugging only.
|
||||
VAddr mutex_wait_address_for_debugging{};
|
||||
|
||||
/// The reason the thread is waiting. This is used for debugging only.
|
||||
ThreadWaitReasonForDebugging wait_reason_for_debugging{};
|
||||
|
||||
KSynchronizationObject* signaling_object;
|
||||
ResultCode signaling_result{RESULT_SUCCESS};
|
||||
|
||||
/// List of threads that are waiting for a mutex that is held by this thread.
|
||||
MutexWaitingThreads wait_mutex_threads;
|
||||
|
||||
/// Thread that owns the lock that this thread is waiting for.
|
||||
Thread* lock_owner{};
|
||||
|
||||
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
|
||||
Handle global_handle = 0;
|
||||
|
||||
KScheduler* scheduler = nullptr;
|
||||
|
||||
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
|
||||
|
||||
u32 ideal_core{0xFFFFFFFF};
|
||||
KAffinityMask affinity_mask{};
|
||||
|
||||
s32 ideal_core_override = -1;
|
||||
u32 affinity_override_count = 0;
|
||||
|
||||
u32 pausing_state = 0;
|
||||
bool is_running = false;
|
||||
bool is_cancellable = false;
|
||||
bool is_sync_cancelled = false;
|
||||
|
||||
bool is_continuous_on_svc = false;
|
||||
|
||||
bool will_be_terminated = false;
|
||||
bool is_phantom_mode = false;
|
||||
bool has_exited = false;
|
||||
|
||||
bool was_running = false;
|
||||
|
||||
bool signaled{};
|
||||
|
||||
ConditionVariableThreadTree* condvar_tree{};
|
||||
uintptr_t condvar_key{};
|
||||
VAddr address_key{};
|
||||
u32 address_key_value{};
|
||||
s32 num_kernel_waiters{};
|
||||
|
||||
using WaiterList = boost::intrusive::list<Thread>;
|
||||
WaiterList waiter_list{};
|
||||
WaiterList pinned_waiter_list{};
|
||||
|
||||
std::string name;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,55 +0,0 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/transfer_memory.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
TransferMemory::TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory)
|
||||
: Object{kernel}, memory{memory} {}
|
||||
|
||||
TransferMemory::~TransferMemory() {
|
||||
// Release memory region when transfer memory is destroyed
|
||||
Reset();
|
||||
owner_process->GetResourceLimit()->Release(LimitableResource::TransferMemory, 1);
|
||||
}
|
||||
|
||||
std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel,
|
||||
Core::Memory::Memory& memory,
|
||||
VAddr base_address, std::size_t size,
|
||||
KMemoryPermission permissions) {
|
||||
std::shared_ptr<TransferMemory> transfer_memory{
|
||||
std::make_shared<TransferMemory>(kernel, memory)};
|
||||
|
||||
transfer_memory->base_address = base_address;
|
||||
transfer_memory->size = size;
|
||||
transfer_memory->owner_permissions = permissions;
|
||||
transfer_memory->owner_process = kernel.CurrentProcess();
|
||||
|
||||
return transfer_memory;
|
||||
}
|
||||
|
||||
u8* TransferMemory::GetPointer() {
|
||||
return memory.GetPointer(base_address);
|
||||
}
|
||||
|
||||
const u8* TransferMemory::GetPointer() const {
|
||||
return memory.GetPointer(base_address);
|
||||
}
|
||||
|
||||
ResultCode TransferMemory::Reserve() {
|
||||
return owner_process->PageTable().ReserveTransferMemory(base_address, size, owner_permissions);
|
||||
}
|
||||
|
||||
ResultCode TransferMemory::Reset() {
|
||||
return owner_process->PageTable().ResetTransferMemory(base_address, size);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,96 +0,0 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/physical_memory.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Process;
|
||||
|
||||
/// Defines the interface for transfer memory objects.
|
||||
///
|
||||
/// Transfer memory is typically used for the purpose of
|
||||
/// transferring memory between separate process instances,
|
||||
/// thus the name.
|
||||
///
|
||||
class TransferMemory final : public Object {
|
||||
public:
|
||||
explicit TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory);
|
||||
~TransferMemory() override;
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory;
|
||||
|
||||
static std::shared_ptr<TransferMemory> Create(KernelCore& kernel, Core::Memory::Memory& memory,
|
||||
VAddr base_address, std::size_t size,
|
||||
KMemoryPermission permissions);
|
||||
|
||||
TransferMemory(const TransferMemory&) = delete;
|
||||
TransferMemory& operator=(const TransferMemory&) = delete;
|
||||
|
||||
TransferMemory(TransferMemory&&) = delete;
|
||||
TransferMemory& operator=(TransferMemory&&) = delete;
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "TransferMemory";
|
||||
}
|
||||
|
||||
std::string GetName() const override {
|
||||
return GetTypeName();
|
||||
}
|
||||
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
/// Gets a pointer to the backing block of this instance.
|
||||
u8* GetPointer();
|
||||
|
||||
/// Gets a pointer to the backing block of this instance.
|
||||
const u8* GetPointer() const;
|
||||
|
||||
/// Gets the size of the memory backing this instance in bytes.
|
||||
constexpr std::size_t GetSize() const {
|
||||
return size;
|
||||
}
|
||||
|
||||
/// Reserves the region to be used for the transfer memory, called after the transfer memory is
|
||||
/// created.
|
||||
ResultCode Reserve();
|
||||
|
||||
/// Resets the region previously used for the transfer memory, called after the transfer memory
|
||||
/// is closed.
|
||||
ResultCode Reset();
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
private:
|
||||
/// The base address for the memory managed by this instance.
|
||||
VAddr base_address{};
|
||||
|
||||
/// Size of the memory, in bytes, that this instance manages.
|
||||
std::size_t size{};
|
||||
|
||||
/// The memory permissions that are applied to this instance.
|
||||
KMemoryPermission owner_permissions{};
|
||||
|
||||
/// The process that this transfer memory instance was created under.
|
||||
Process* owner_process{};
|
||||
|
||||
Core::Memory::Memory& memory;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
@@ -1,41 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
WritableEvent::WritableEvent(KernelCore& kernel) : Object{kernel} {}
|
||||
WritableEvent::~WritableEvent() = default;
|
||||
|
||||
EventPair WritableEvent::CreateEventPair(KernelCore& kernel, std::string name) {
|
||||
std::shared_ptr<WritableEvent> writable_event(new WritableEvent(kernel));
|
||||
std::shared_ptr<ReadableEvent> readable_event(new ReadableEvent(kernel));
|
||||
|
||||
writable_event->name = name + ":Writable";
|
||||
writable_event->readable = readable_event;
|
||||
readable_event->name = name + ":Readable";
|
||||
|
||||
return {std::move(readable_event), std::move(writable_event)};
|
||||
}
|
||||
|
||||
std::shared_ptr<ReadableEvent> WritableEvent::GetReadableEvent() const {
|
||||
return readable;
|
||||
}
|
||||
|
||||
void WritableEvent::Signal() {
|
||||
readable->Signal();
|
||||
}
|
||||
|
||||
void WritableEvent::Clear() {
|
||||
readable->Clear();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
@@ -1,60 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class ReadableEvent;
|
||||
class WritableEvent;
|
||||
|
||||
struct EventPair {
|
||||
std::shared_ptr<ReadableEvent> readable;
|
||||
std::shared_ptr<WritableEvent> writable;
|
||||
};
|
||||
|
||||
class WritableEvent final : public Object {
|
||||
public:
|
||||
~WritableEvent() override;
|
||||
|
||||
/**
|
||||
* Creates an event
|
||||
* @param kernel The kernel instance to create this event under.
|
||||
* @param name Optional name of event
|
||||
*/
|
||||
static EventPair CreateEventPair(KernelCore& kernel, std::string name = "Unknown");
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "WritableEvent";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::WritableEvent;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
std::shared_ptr<ReadableEvent> GetReadableEvent() const;
|
||||
|
||||
void Signal();
|
||||
void Clear();
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
private:
|
||||
explicit WritableEvent(KernelCore& kernel);
|
||||
|
||||
std::shared_ptr<ReadableEvent> readable;
|
||||
|
||||
std::string name; ///< Name of event (optional)
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
Reference in New Issue
Block a user