early-access version 1255
This commit is contained in:
182
src/core/hle/ipc.h
Executable file
182
src/core/hle/ipc.h
Executable file
@@ -0,0 +1,182 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
|
||||
namespace IPC {
|
||||
|
||||
/// Size of the command buffer area, in 32-bit words.
|
||||
constexpr std::size_t COMMAND_BUFFER_LENGTH = 0x100 / sizeof(u32);
|
||||
|
||||
enum class ControlCommand : u32 {
|
||||
ConvertSessionToDomain = 0,
|
||||
ConvertDomainToSession = 1,
|
||||
DuplicateSession = 2,
|
||||
QueryPointerBufferSize = 3,
|
||||
DuplicateSessionEx = 4,
|
||||
Unspecified,
|
||||
};
|
||||
|
||||
enum class CommandType : u32 {
|
||||
Invalid = 0,
|
||||
LegacyRequest = 1,
|
||||
Close = 2,
|
||||
LegacyControl = 3,
|
||||
Request = 4,
|
||||
Control = 5,
|
||||
RequestWithContext = 6,
|
||||
ControlWithContext = 7,
|
||||
Unspecified,
|
||||
};
|
||||
|
||||
struct CommandHeader {
|
||||
union {
|
||||
u32_le raw_low;
|
||||
BitField<0, 16, CommandType> type;
|
||||
BitField<16, 4, u32> num_buf_x_descriptors;
|
||||
BitField<20, 4, u32> num_buf_a_descriptors;
|
||||
BitField<24, 4, u32> num_buf_b_descriptors;
|
||||
BitField<28, 4, u32> num_buf_w_descriptors;
|
||||
};
|
||||
|
||||
enum class BufferDescriptorCFlag : u32 {
|
||||
Disabled = 0,
|
||||
InlineDescriptor = 1,
|
||||
OneDescriptor = 2,
|
||||
};
|
||||
|
||||
union {
|
||||
u32_le raw_high;
|
||||
BitField<0, 10, u32> data_size;
|
||||
BitField<10, 4, BufferDescriptorCFlag> buf_c_descriptor_flags;
|
||||
BitField<31, 1, u32> enable_handle_descriptor;
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(CommandHeader) == 8, "CommandHeader size is incorrect");
|
||||
|
||||
union HandleDescriptorHeader {
|
||||
u32_le raw_high;
|
||||
BitField<0, 1, u32> send_current_pid;
|
||||
BitField<1, 4, u32> num_handles_to_copy;
|
||||
BitField<5, 4, u32> num_handles_to_move;
|
||||
};
|
||||
static_assert(sizeof(HandleDescriptorHeader) == 4, "HandleDescriptorHeader size is incorrect");
|
||||
|
||||
struct BufferDescriptorX {
|
||||
union {
|
||||
BitField<0, 6, u32> counter_bits_0_5;
|
||||
BitField<6, 3, u32> address_bits_36_38;
|
||||
BitField<9, 3, u32> counter_bits_9_11;
|
||||
BitField<12, 4, u32> address_bits_32_35;
|
||||
BitField<16, 16, u32> size;
|
||||
};
|
||||
|
||||
u32_le address_bits_0_31;
|
||||
|
||||
u32_le Counter() const {
|
||||
u32_le counter{counter_bits_0_5};
|
||||
counter |= counter_bits_9_11 << 9;
|
||||
return counter;
|
||||
}
|
||||
|
||||
VAddr Address() const {
|
||||
VAddr address{address_bits_0_31};
|
||||
address |= static_cast<VAddr>(address_bits_32_35) << 32;
|
||||
address |= static_cast<VAddr>(address_bits_36_38) << 36;
|
||||
return address;
|
||||
}
|
||||
|
||||
u64 Size() const {
|
||||
return static_cast<u64>(size);
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(BufferDescriptorX) == 8, "BufferDescriptorX size is incorrect");
|
||||
|
||||
struct BufferDescriptorABW {
|
||||
u32_le size_bits_0_31;
|
||||
u32_le address_bits_0_31;
|
||||
|
||||
union {
|
||||
BitField<0, 2, u32> flags;
|
||||
BitField<2, 3, u32> address_bits_36_38;
|
||||
BitField<24, 4, u32> size_bits_32_35;
|
||||
BitField<28, 4, u32> address_bits_32_35;
|
||||
};
|
||||
|
||||
VAddr Address() const {
|
||||
VAddr address{address_bits_0_31};
|
||||
address |= static_cast<VAddr>(address_bits_32_35) << 32;
|
||||
address |= static_cast<VAddr>(address_bits_36_38) << 36;
|
||||
return address;
|
||||
}
|
||||
|
||||
u64 Size() const {
|
||||
u64 size{size_bits_0_31};
|
||||
size |= static_cast<u64>(size_bits_32_35) << 32;
|
||||
return size;
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(BufferDescriptorABW) == 12, "BufferDescriptorABW size is incorrect");
|
||||
|
||||
struct BufferDescriptorC {
|
||||
u32_le address_bits_0_31;
|
||||
|
||||
union {
|
||||
BitField<0, 16, u32> address_bits_32_47;
|
||||
BitField<16, 16, u32> size;
|
||||
};
|
||||
|
||||
VAddr Address() const {
|
||||
VAddr address{address_bits_0_31};
|
||||
address |= static_cast<VAddr>(address_bits_32_47) << 32;
|
||||
return address;
|
||||
}
|
||||
|
||||
u64 Size() const {
|
||||
return static_cast<u64>(size);
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(BufferDescriptorC) == 8, "BufferDescriptorC size is incorrect");
|
||||
|
||||
struct DataPayloadHeader {
|
||||
u32_le magic;
|
||||
INSERT_PADDING_WORDS(1);
|
||||
};
|
||||
static_assert(sizeof(DataPayloadHeader) == 8, "DataPayloadHeader size is incorrect");
|
||||
|
||||
struct DomainMessageHeader {
|
||||
enum class CommandType : u32_le {
|
||||
SendMessage = 1,
|
||||
CloseVirtualHandle = 2,
|
||||
};
|
||||
|
||||
union {
|
||||
// Used when responding to an IPC request, Server -> Client.
|
||||
struct {
|
||||
u32_le num_objects;
|
||||
INSERT_UNION_PADDING_WORDS(3);
|
||||
};
|
||||
|
||||
// Used when performing an IPC request, Client -> Server.
|
||||
struct {
|
||||
union {
|
||||
BitField<0, 8, CommandType> command;
|
||||
BitField<8, 8, u32> input_object_count;
|
||||
BitField<16, 16, u32> size;
|
||||
};
|
||||
u32_le object_id;
|
||||
INSERT_UNION_PADDING_WORDS(2);
|
||||
};
|
||||
|
||||
std::array<u32, 4> raw{};
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(DomainMessageHeader) == 16, "DomainMessageHeader size is incorrect");
|
||||
|
||||
} // namespace IPC
|
||||
482
src/core/hle/ipc_helpers.h
Executable file
482
src/core/hle/ipc_helpers.h
Executable file
@@ -0,0 +1,482 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/ipc.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace IPC {
|
||||
|
||||
constexpr ResultCode ERR_REMOTE_PROCESS_DEAD{ErrorModule::HIPC, 301};
|
||||
|
||||
class RequestHelperBase {
|
||||
protected:
|
||||
Kernel::HLERequestContext* context = nullptr;
|
||||
u32* cmdbuf;
|
||||
ptrdiff_t index = 0;
|
||||
|
||||
public:
|
||||
explicit RequestHelperBase(u32* command_buffer) : cmdbuf(command_buffer) {}
|
||||
|
||||
explicit RequestHelperBase(Kernel::HLERequestContext& context)
|
||||
: context(&context), cmdbuf(context.CommandBuffer()) {}
|
||||
|
||||
void Skip(u32 size_in_words, bool set_to_null) {
|
||||
if (set_to_null) {
|
||||
memset(cmdbuf + index, 0, size_in_words * sizeof(u32));
|
||||
}
|
||||
index += static_cast<ptrdiff_t>(size_in_words);
|
||||
}
|
||||
|
||||
/**
|
||||
* Aligns the current position forward to a 16-byte boundary, padding with zeros.
|
||||
*/
|
||||
void AlignWithPadding() {
|
||||
if (index & 3) {
|
||||
Skip(static_cast<u32>(4 - (index & 3)), true);
|
||||
}
|
||||
}
|
||||
|
||||
u32 GetCurrentOffset() const {
|
||||
return static_cast<u32>(index);
|
||||
}
|
||||
|
||||
void SetCurrentOffset(u32 offset) {
|
||||
index = static_cast<ptrdiff_t>(offset);
|
||||
}
|
||||
};
|
||||
|
||||
class ResponseBuilder : public RequestHelperBase {
|
||||
public:
|
||||
/// Flags used for customizing the behavior of ResponseBuilder
|
||||
enum class Flags : u32 {
|
||||
None = 0,
|
||||
/// Uses move handles to move objects in the response, even when in a domain. This is
|
||||
/// required when PushMoveObjects is used.
|
||||
AlwaysMoveHandles = 1,
|
||||
};
|
||||
|
||||
explicit ResponseBuilder(Kernel::HLERequestContext& context, u32 normal_params_size,
|
||||
u32 num_handles_to_copy = 0, u32 num_objects_to_move = 0,
|
||||
Flags flags = Flags::None)
|
||||
: RequestHelperBase(context), normal_params_size(normal_params_size),
|
||||
num_handles_to_copy(num_handles_to_copy),
|
||||
num_objects_to_move(num_objects_to_move), kernel{context.kernel} {
|
||||
|
||||
memset(cmdbuf, 0, sizeof(u32) * IPC::COMMAND_BUFFER_LENGTH);
|
||||
|
||||
context.ClearIncomingObjects();
|
||||
|
||||
IPC::CommandHeader header{};
|
||||
|
||||
// The entire size of the raw data section in u32 units, including the 16 bytes of mandatory
|
||||
// padding.
|
||||
u64 raw_data_size = sizeof(IPC::DataPayloadHeader) / 4 + 4 + normal_params_size;
|
||||
|
||||
u32 num_handles_to_move{};
|
||||
u32 num_domain_objects{};
|
||||
const bool always_move_handles{
|
||||
(static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0};
|
||||
if (!context.Session()->IsDomain() || always_move_handles) {
|
||||
num_handles_to_move = num_objects_to_move;
|
||||
} else {
|
||||
num_domain_objects = num_objects_to_move;
|
||||
}
|
||||
|
||||
if (context.Session()->IsDomain()) {
|
||||
raw_data_size += sizeof(DomainMessageHeader) / 4 + num_domain_objects;
|
||||
}
|
||||
|
||||
header.data_size.Assign(static_cast<u32>(raw_data_size));
|
||||
if (num_handles_to_copy || num_handles_to_move) {
|
||||
header.enable_handle_descriptor.Assign(1);
|
||||
}
|
||||
PushRaw(header);
|
||||
|
||||
if (header.enable_handle_descriptor) {
|
||||
IPC::HandleDescriptorHeader handle_descriptor_header{};
|
||||
handle_descriptor_header.num_handles_to_copy.Assign(num_handles_to_copy);
|
||||
handle_descriptor_header.num_handles_to_move.Assign(num_handles_to_move);
|
||||
PushRaw(handle_descriptor_header);
|
||||
Skip(num_handles_to_copy + num_handles_to_move, true);
|
||||
}
|
||||
|
||||
AlignWithPadding();
|
||||
|
||||
if (context.Session()->IsDomain() && context.HasDomainMessageHeader()) {
|
||||
IPC::DomainMessageHeader domain_header{};
|
||||
domain_header.num_objects = num_domain_objects;
|
||||
PushRaw(domain_header);
|
||||
}
|
||||
|
||||
IPC::DataPayloadHeader data_payload_header{};
|
||||
data_payload_header.magic = Common::MakeMagic('S', 'F', 'C', 'O');
|
||||
PushRaw(data_payload_header);
|
||||
|
||||
datapayload_index = index;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void PushIpcInterface(std::shared_ptr<T> iface) {
|
||||
if (context->Session()->IsDomain()) {
|
||||
context->AddDomainObject(std::move(iface));
|
||||
} else {
|
||||
auto [client, server] = Kernel::Session::Create(kernel, iface->GetServiceName());
|
||||
context->AddMoveObject(std::move(client));
|
||||
iface->ClientConnected(std::move(server));
|
||||
}
|
||||
}
|
||||
|
||||
template <class T, class... Args>
|
||||
void PushIpcInterface(Args&&... args) {
|
||||
PushIpcInterface<T>(std::make_shared<T>(std::forward<Args>(args)...));
|
||||
}
|
||||
|
||||
void ValidateHeader() {
|
||||
const std::size_t num_domain_objects = context->NumDomainObjects();
|
||||
const std::size_t num_move_objects = context->NumMoveObjects();
|
||||
ASSERT_MSG(!num_domain_objects || !num_move_objects,
|
||||
"cannot move normal handles and domain objects");
|
||||
ASSERT_MSG((index - datapayload_index) == normal_params_size,
|
||||
"normal_params_size value is incorrect");
|
||||
ASSERT_MSG((num_domain_objects + num_move_objects) == num_objects_to_move,
|
||||
"num_objects_to_move value is incorrect");
|
||||
ASSERT_MSG(context->NumCopyObjects() == num_handles_to_copy,
|
||||
"num_handles_to_copy value is incorrect");
|
||||
}
|
||||
|
||||
// Validate on destruction, as there shouldn't be any case where we don't want it
|
||||
~ResponseBuilder() {
|
||||
ValidateHeader();
|
||||
}
|
||||
|
||||
void PushImpl(s8 value);
|
||||
void PushImpl(s16 value);
|
||||
void PushImpl(s32 value);
|
||||
void PushImpl(s64 value);
|
||||
void PushImpl(u8 value);
|
||||
void PushImpl(u16 value);
|
||||
void PushImpl(u32 value);
|
||||
void PushImpl(u64 value);
|
||||
void PushImpl(float value);
|
||||
void PushImpl(double value);
|
||||
void PushImpl(bool value);
|
||||
void PushImpl(ResultCode value);
|
||||
|
||||
template <typename T>
|
||||
void Push(T value) {
|
||||
return PushImpl(value);
|
||||
}
|
||||
|
||||
template <typename First, typename... Other>
|
||||
void Push(const First& first_value, const Other&... other_values);
|
||||
|
||||
/**
|
||||
* Helper function for pushing strongly-typed enumeration values.
|
||||
*
|
||||
* @tparam Enum The enumeration type to be pushed
|
||||
*
|
||||
* @param value The value to push.
|
||||
*
|
||||
* @note The underlying size of the enumeration type is the size of the
|
||||
* data that gets pushed. e.g. "enum class SomeEnum : u16" will
|
||||
* push a u16-sized amount of data.
|
||||
*/
|
||||
template <typename Enum>
|
||||
void PushEnum(Enum value) {
|
||||
static_assert(std::is_enum_v<Enum>, "T must be an enum type within a PushEnum call.");
|
||||
static_assert(!std::is_convertible_v<Enum, int>,
|
||||
"enum type in PushEnum must be a strongly typed enum.");
|
||||
Push(static_cast<std::underlying_type_t<Enum>>(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Copies the content of the given trivially copyable class to the buffer as a normal
|
||||
* param
|
||||
* @note: The input class must be correctly packed/padded to fit hardware layout.
|
||||
*/
|
||||
template <typename T>
|
||||
void PushRaw(const T& value);
|
||||
|
||||
template <typename... O>
|
||||
void PushMoveObjects(std::shared_ptr<O>... pointers);
|
||||
|
||||
template <typename... O>
|
||||
void PushCopyObjects(std::shared_ptr<O>... pointers);
|
||||
|
||||
private:
|
||||
u32 normal_params_size{};
|
||||
u32 num_handles_to_copy{};
|
||||
u32 num_objects_to_move{}; ///< Domain objects or move handles, context dependent
|
||||
std::ptrdiff_t datapayload_index{};
|
||||
Kernel::KernelCore& kernel;
|
||||
};
|
||||
|
||||
/// Push ///
|
||||
|
||||
inline void ResponseBuilder::PushImpl(s32 value) {
|
||||
cmdbuf[index++] = static_cast<u32>(value);
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(u32 value) {
|
||||
cmdbuf[index++] = value;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void ResponseBuilder::PushRaw(const T& value) {
|
||||
static_assert(std::is_trivially_copyable_v<T>,
|
||||
"It's undefined behavior to use memcpy with non-trivially copyable objects");
|
||||
std::memcpy(cmdbuf + index, &value, sizeof(T));
|
||||
index += (sizeof(T) + 3) / 4; // round up to word length
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(ResultCode value) {
|
||||
// Result codes are actually 64-bit in the IPC buffer, but only the high part is discarded.
|
||||
Push(value.raw);
|
||||
Push<u32>(0);
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(s8 value) {
|
||||
PushRaw(value);
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(s16 value) {
|
||||
PushRaw(value);
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(s64 value) {
|
||||
PushImpl(static_cast<u32>(value));
|
||||
PushImpl(static_cast<u32>(value >> 32));
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(u8 value) {
|
||||
PushRaw(value);
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(u16 value) {
|
||||
PushRaw(value);
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(u64 value) {
|
||||
PushImpl(static_cast<u32>(value));
|
||||
PushImpl(static_cast<u32>(value >> 32));
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(float value) {
|
||||
u32 integral;
|
||||
std::memcpy(&integral, &value, sizeof(u32));
|
||||
PushImpl(integral);
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(double value) {
|
||||
u64 integral;
|
||||
std::memcpy(&integral, &value, sizeof(u64));
|
||||
PushImpl(integral);
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(bool value) {
|
||||
PushImpl(static_cast<u8>(value));
|
||||
}
|
||||
|
||||
template <typename First, typename... Other>
|
||||
void ResponseBuilder::Push(const First& first_value, const Other&... other_values) {
|
||||
Push(first_value);
|
||||
Push(other_values...);
|
||||
}
|
||||
|
||||
template <typename... O>
|
||||
inline void ResponseBuilder::PushCopyObjects(std::shared_ptr<O>... pointers) {
|
||||
auto objects = {pointers...};
|
||||
for (auto& object : objects) {
|
||||
context->AddCopyObject(std::move(object));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename... O>
|
||||
inline void ResponseBuilder::PushMoveObjects(std::shared_ptr<O>... pointers) {
|
||||
auto objects = {pointers...};
|
||||
for (auto& object : objects) {
|
||||
context->AddMoveObject(std::move(object));
|
||||
}
|
||||
}
|
||||
|
||||
class RequestParser : public RequestHelperBase {
|
||||
public:
|
||||
explicit RequestParser(u32* command_buffer) : RequestHelperBase(command_buffer) {}
|
||||
|
||||
explicit RequestParser(Kernel::HLERequestContext& context) : RequestHelperBase(context) {
|
||||
ASSERT_MSG(context.GetDataPayloadOffset(), "context is incomplete");
|
||||
Skip(context.GetDataPayloadOffset(), false);
|
||||
// Skip the u64 command id, it's already stored in the context
|
||||
static constexpr u32 CommandIdSize = 2;
|
||||
Skip(CommandIdSize, false);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T Pop();
|
||||
|
||||
template <typename T>
|
||||
void Pop(T& value);
|
||||
|
||||
template <typename First, typename... Other>
|
||||
void Pop(First& first_value, Other&... other_values);
|
||||
|
||||
template <typename T>
|
||||
T PopEnum() {
|
||||
static_assert(std::is_enum_v<T>, "T must be an enum type within a PopEnum call.");
|
||||
static_assert(!std::is_convertible_v<T, int>,
|
||||
"enum type in PopEnum must be a strongly typed enum.");
|
||||
return static_cast<T>(Pop<std::underlying_type_t<T>>());
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reads the next normal parameters as a struct, by copying it
|
||||
* @note: The output class must be correctly packed/padded to fit hardware layout.
|
||||
*/
|
||||
template <typename T>
|
||||
void PopRaw(T& value);
|
||||
|
||||
/**
|
||||
* @brief Reads the next normal parameters as a struct, by copying it into a new value
|
||||
* @note: The output class must be correctly packed/padded to fit hardware layout.
|
||||
*/
|
||||
template <typename T>
|
||||
T PopRaw();
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetMoveObject(std::size_t index);
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetCopyObject(std::size_t index);
|
||||
|
||||
template <class T>
|
||||
std::shared_ptr<T> PopIpcInterface() {
|
||||
ASSERT(context->Session()->IsDomain());
|
||||
ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
|
||||
return context->GetDomainRequestHandler<T>(Pop<u32>() - 1);
|
||||
}
|
||||
};
|
||||
|
||||
/// Pop ///
|
||||
|
||||
template <>
|
||||
inline u32 RequestParser::Pop() {
|
||||
return cmdbuf[index++];
|
||||
}
|
||||
|
||||
template <>
|
||||
inline s32 RequestParser::Pop() {
|
||||
return static_cast<s32>(Pop<u32>());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void RequestParser::PopRaw(T& value) {
|
||||
static_assert(std::is_trivially_copyable_v<T>,
|
||||
"It's undefined behavior to use memcpy with non-trivially copyable objects");
|
||||
std::memcpy(&value, cmdbuf + index, sizeof(T));
|
||||
index += (sizeof(T) + 3) / 4; // round up to word length
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T RequestParser::PopRaw() {
|
||||
T value;
|
||||
PopRaw(value);
|
||||
return value;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline u8 RequestParser::Pop() {
|
||||
return PopRaw<u8>();
|
||||
}
|
||||
|
||||
template <>
|
||||
inline u16 RequestParser::Pop() {
|
||||
return PopRaw<u16>();
|
||||
}
|
||||
|
||||
template <>
|
||||
inline u64 RequestParser::Pop() {
|
||||
const u64 lsw = Pop<u32>();
|
||||
const u64 msw = Pop<u32>();
|
||||
return msw << 32 | lsw;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline s8 RequestParser::Pop() {
|
||||
return static_cast<s8>(Pop<u8>());
|
||||
}
|
||||
|
||||
template <>
|
||||
inline s16 RequestParser::Pop() {
|
||||
return static_cast<s16>(Pop<u16>());
|
||||
}
|
||||
|
||||
template <>
|
||||
inline s64 RequestParser::Pop() {
|
||||
return static_cast<s64>(Pop<u64>());
|
||||
}
|
||||
|
||||
template <>
|
||||
inline float RequestParser::Pop() {
|
||||
const u32 value = Pop<u32>();
|
||||
float real;
|
||||
std::memcpy(&real, &value, sizeof(real));
|
||||
return real;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline double RequestParser::Pop() {
|
||||
const u64 value = Pop<u64>();
|
||||
double real;
|
||||
std::memcpy(&real, &value, sizeof(real));
|
||||
return real;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool RequestParser::Pop() {
|
||||
return Pop<u8>() != 0;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline ResultCode RequestParser::Pop() {
|
||||
return ResultCode{Pop<u32>()};
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void RequestParser::Pop(T& value) {
|
||||
value = Pop<T>();
|
||||
}
|
||||
|
||||
template <typename First, typename... Other>
|
||||
void RequestParser::Pop(First& first_value, Other&... other_values) {
|
||||
first_value = Pop<First>();
|
||||
Pop(other_values...);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> RequestParser::GetMoveObject(std::size_t index) {
|
||||
return context->GetMoveObject<T>(index);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> RequestParser::GetCopyObject(std::size_t index) {
|
||||
return context->GetCopyObject<T>(index);
|
||||
}
|
||||
|
||||
} // namespace IPC
|
||||
317
src/core/hle/kernel/address_arbiter.cpp
Executable file
317
src/core/hle/kernel/address_arbiter.cpp
Executable file
@@ -0,0 +1,317 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
// Wake up num_to_wake (or all) threads in a vector.
|
||||
void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
|
||||
s32 num_to_wake) {
|
||||
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
|
||||
// them all.
|
||||
std::size_t last = waiting_threads.size();
|
||||
if (num_to_wake > 0) {
|
||||
last = std::min(last, static_cast<std::size_t>(num_to_wake));
|
||||
}
|
||||
|
||||
// Signal the waiting threads.
|
||||
for (std::size_t i = 0; i < last; i++) {
|
||||
waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
RemoveThread(waiting_threads[i]);
|
||||
waiting_threads[i]->WaitForArbitration(false);
|
||||
waiting_threads[i]->ResumeFromWait();
|
||||
}
|
||||
}
|
||||
|
||||
AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
|
||||
AddressArbiter::~AddressArbiter() = default;
|
||||
|
||||
ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
|
||||
s32 num_to_wake) {
|
||||
switch (type) {
|
||||
case SignalType::Signal:
|
||||
return SignalToAddressOnly(address, num_to_wake);
|
||||
case SignalType::IncrementAndSignalIfEqual:
|
||||
return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
|
||||
case SignalType::ModifyByWaitingCountAndSignalIfEqual:
|
||||
return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
|
||||
default:
|
||||
return ERR_INVALID_ENUM_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
u32 current_value;
|
||||
do {
|
||||
current_value = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
if (current_value != static_cast<u32>(value)) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
current_value++;
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, current_value));
|
||||
|
||||
return SignalToAddressOnly(address, num_to_wake);
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
// Get threads waiting on the address.
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
s32 updated_value;
|
||||
do {
|
||||
updated_value = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
if (updated_value != value) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
// Determine the modified value depending on the waiting count.
|
||||
if (num_to_wake <= 0) {
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else {
|
||||
updated_value = value - 1;
|
||||
}
|
||||
} else {
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
|
||||
updated_value = value - 1;
|
||||
} else {
|
||||
updated_value = value;
|
||||
}
|
||||
}
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
|
||||
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
|
||||
s64 timeout_ns) {
|
||||
switch (type) {
|
||||
case ArbitrationType::WaitIfLessThan:
|
||||
return WaitForAddressIfLessThan(address, value, timeout_ns, false);
|
||||
case ArbitrationType::DecrementAndWaitIfLessThan:
|
||||
return WaitForAddressIfLessThan(address, value, timeout_ns, true);
|
||||
case ArbitrationType::WaitIfEqual:
|
||||
return WaitForAddressIfEqual(address, value, timeout_ns);
|
||||
default:
|
||||
return ERR_INVALID_ENUM_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
||||
bool should_decrement) {
|
||||
auto& memory = system.Memory();
|
||||
auto& kernel = system.Kernel();
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||
|
||||
if (current_thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
}
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
||||
if (current_value >= value) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
|
||||
s32 decrement_value;
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
do {
|
||||
current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
if (should_decrement) {
|
||||
decrement_value = current_value - 1;
|
||||
} else {
|
||||
decrement_value = current_value;
|
||||
}
|
||||
} while (
|
||||
!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
|
||||
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
if (timeout == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
InsertThread(SharedFrom(current_thread));
|
||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
||||
current_thread->WaitForArbitration(true);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (current_thread->IsWaitingForArbitration()) {
|
||||
RemoveThread(SharedFrom(current_thread));
|
||||
current_thread->WaitForArbitration(false);
|
||||
}
|
||||
}
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
|
||||
auto& memory = system.Memory();
|
||||
auto& kernel = system.Kernel();
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||
|
||||
if (current_thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
}
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
||||
if (current_value != value) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
if (timeout == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
InsertThread(SharedFrom(current_thread));
|
||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
||||
current_thread->WaitForArbitration(true);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (current_thread->IsWaitingForArbitration()) {
|
||||
RemoveThread(SharedFrom(current_thread));
|
||||
current_thread->WaitForArbitration(false);
|
||||
}
|
||||
}
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
|
||||
const auto iter =
|
||||
std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
|
||||
return entry->GetPriority() >= thread->GetPriority();
|
||||
});
|
||||
|
||||
if (iter == thread_list.cend()) {
|
||||
thread_list.push_back(std::move(thread));
|
||||
} else {
|
||||
thread_list.insert(iter, std::move(thread));
|
||||
}
|
||||
}
|
||||
|
||||
void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
|
||||
const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
|
||||
[&thread](const auto& entry) { return thread == entry; });
|
||||
|
||||
if (iter != thread_list.cend()) {
|
||||
thread_list.erase(iter);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
|
||||
VAddr address) const {
|
||||
const auto iter = arb_threads.find(address);
|
||||
if (iter == arb_threads.cend()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
|
||||
return {thread_list.cbegin(), thread_list.cend()};
|
||||
}
|
||||
} // namespace Kernel
|
||||
91
src/core/hle/kernel/address_arbiter.h
Executable file
91
src/core/hle/kernel/address_arbiter.h
Executable file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Thread;
|
||||
|
||||
class AddressArbiter {
|
||||
public:
|
||||
enum class ArbitrationType {
|
||||
WaitIfLessThan = 0,
|
||||
DecrementAndWaitIfLessThan = 1,
|
||||
WaitIfEqual = 2,
|
||||
};
|
||||
|
||||
enum class SignalType {
|
||||
Signal = 0,
|
||||
IncrementAndSignalIfEqual = 1,
|
||||
ModifyByWaitingCountAndSignalIfEqual = 2,
|
||||
};
|
||||
|
||||
explicit AddressArbiter(Core::System& system);
|
||||
~AddressArbiter();
|
||||
|
||||
AddressArbiter(const AddressArbiter&) = delete;
|
||||
AddressArbiter& operator=(const AddressArbiter&) = delete;
|
||||
|
||||
AddressArbiter(AddressArbiter&&) = default;
|
||||
AddressArbiter& operator=(AddressArbiter&&) = delete;
|
||||
|
||||
/// Signals an address being waited on with a particular signaling type.
|
||||
ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
|
||||
|
||||
/// Waits on an address with a particular arbitration type.
|
||||
ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
|
||||
|
||||
private:
|
||||
/// Signals an address being waited on.
|
||||
ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
|
||||
|
||||
/// Signals an address being waited on and increments its value if equal to the value argument.
|
||||
ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
|
||||
|
||||
/// Signals an address being waited on and modifies its value based on waiting thread count if
|
||||
/// equal to the value argument.
|
||||
ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake);
|
||||
|
||||
/// Waits on an address if the value passed is less than the argument value,
|
||||
/// optionally decrementing.
|
||||
ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
||||
bool should_decrement);
|
||||
|
||||
/// Waits on an address if the value passed is equal to the argument value.
|
||||
ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
|
||||
|
||||
/// Wake up num_to_wake (or all) threads in a vector.
|
||||
void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
|
||||
|
||||
/// Insert a thread into the address arbiter container
|
||||
void InsertThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the address arbiter container
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
// Gets the threads waiting on an address.
|
||||
std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
|
||||
|
||||
/// List of threads waiting for a address arbiter
|
||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
|
||||
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
50
src/core/hle/kernel/client_port.cpp
Executable file
50
src/core/hle/kernel/client_port.cpp
Executable file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/server_port.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ClientPort::ClientPort(KernelCore& kernel) : Object{kernel} {}
|
||||
ClientPort::~ClientPort() = default;
|
||||
|
||||
std::shared_ptr<ServerPort> ClientPort::GetServerPort() const {
|
||||
return server_port;
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
|
||||
if (active_sessions >= max_sessions) {
|
||||
return ERR_MAX_CONNECTIONS_REACHED;
|
||||
}
|
||||
active_sessions++;
|
||||
|
||||
auto [client, server] = Kernel::Session::Create(kernel, name);
|
||||
|
||||
if (server_port->HasHLEHandler()) {
|
||||
server_port->GetHLEHandler()->ClientConnected(std::move(server));
|
||||
} else {
|
||||
server_port->AppendPendingSession(std::move(server));
|
||||
}
|
||||
|
||||
// Wake the threads waiting on the ServerPort
|
||||
server_port->Signal();
|
||||
|
||||
return MakeResult(std::move(client));
|
||||
}
|
||||
|
||||
void ClientPort::ConnectionClosed() {
|
||||
if (active_sessions == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
--active_sessions;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
61
src/core/hle/kernel/client_port.h
Executable file
61
src/core/hle/kernel/client_port.h
Executable file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ClientSession;
|
||||
class KernelCore;
|
||||
class ServerPort;
|
||||
|
||||
class ClientPort final : public Object {
|
||||
public:
|
||||
explicit ClientPort(KernelCore& kernel);
|
||||
~ClientPort() override;
|
||||
|
||||
friend class ServerPort;
|
||||
std::string GetTypeName() const override {
|
||||
return "ClientPort";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ClientPort;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
std::shared_ptr<ServerPort> GetServerPort() const;
|
||||
|
||||
/**
|
||||
* Creates a new Session pair, adds the created ServerSession to the associated ServerPort's
|
||||
* list of pending sessions, and signals the ServerPort, causing any threads
|
||||
* waiting on it to awake.
|
||||
* @returns ClientSession The client endpoint of the created Session pair, or error code.
|
||||
*/
|
||||
ResultVal<std::shared_ptr<ClientSession>> Connect();
|
||||
|
||||
/**
|
||||
* Signifies that a previously active connection has been closed,
|
||||
* decreasing the total number of active connections to this port.
|
||||
*/
|
||||
void ConnectionClosed();
|
||||
|
||||
private:
|
||||
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
|
||||
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
|
||||
u32 active_sessions = 0; ///< Number of currently open sessions to this port
|
||||
std::string name; ///< Name of client port (optional)
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
62
src/core/hle/kernel/client_session.cpp
Executable file
62
src/core/hle/kernel/client_session.cpp
Executable file
@@ -0,0 +1,62 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ClientSession::ClientSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
|
||||
ClientSession::~ClientSession() {
|
||||
// This destructor will be called automatically when the last ClientSession handle is closed by
|
||||
// the emulated application.
|
||||
if (parent->Server()) {
|
||||
parent->Server()->ClientDisconnected();
|
||||
}
|
||||
}
|
||||
|
||||
bool ClientSession::ShouldWait(const Thread* thread) const {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
void ClientSession::Acquire(Thread* thread) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
bool ClientSession::IsSignaled() const {
|
||||
UNIMPLEMENTED();
|
||||
return true;
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name) {
|
||||
std::shared_ptr<ClientSession> client_session{std::make_shared<ClientSession>(kernel)};
|
||||
|
||||
client_session->name = std::move(name);
|
||||
client_session->parent = std::move(parent);
|
||||
|
||||
return MakeResult(std::move(client_session));
|
||||
}
|
||||
|
||||
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread,
|
||||
Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
// Keep ServerSession alive until we're done working with it.
|
||||
if (!parent->Server()) {
|
||||
return ERR_SESSION_CLOSED_BY_REMOTE;
|
||||
}
|
||||
|
||||
// Signal the server session that new data is available
|
||||
return parent->Server()->HandleSyncRequest(std::move(thread), memory, core_timing);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
70
src/core/hle/kernel/client_session.h
Executable file
70
src/core/hle/kernel/client_session.h
Executable file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Session;
|
||||
class Thread;
|
||||
|
||||
class ClientSession final : public SynchronizationObject {
|
||||
public:
|
||||
explicit ClientSession(KernelCore& kernel);
|
||||
~ClientSession() override;
|
||||
|
||||
friend class Session;
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ClientSession";
|
||||
}
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ClientSession;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
private:
|
||||
static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name = "Unknown");
|
||||
|
||||
/// The parent session, which links to the server endpoint.
|
||||
std::shared_ptr<Session> parent;
|
||||
|
||||
/// Name of the client session (optional)
|
||||
std::string name;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
12
src/core/hle/kernel/code_set.cpp
Executable file
12
src/core/hle/kernel/code_set.cpp
Executable file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
CodeSet::CodeSet() = default;
|
||||
CodeSet::~CodeSet() = default;
|
||||
|
||||
} // namespace Kernel
|
||||
90
src/core/hle/kernel/code_set.h
Executable file
90
src/core/hle/kernel/code_set.h
Executable file
@@ -0,0 +1,90 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/physical_memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
/**
|
||||
* Represents executable data that may be loaded into a kernel process.
|
||||
*
|
||||
* A code set consists of three basic segments:
|
||||
* - A code (AKA text) segment,
|
||||
* - A read-only data segment (rodata)
|
||||
* - A data segment
|
||||
*
|
||||
* The code segment is the portion of the object file that contains
|
||||
* executable instructions.
|
||||
*
|
||||
* The read-only data segment in the portion of the object file that
|
||||
* contains (as one would expect) read-only data, such as fixed constant
|
||||
* values and data structures.
|
||||
*
|
||||
* The data segment is similar to the read-only data segment -- it contains
|
||||
* variables and data structures that have predefined values, however,
|
||||
* entities within this segment can be modified.
|
||||
*/
|
||||
struct CodeSet final {
|
||||
/// A single segment within a code set.
|
||||
struct Segment final {
|
||||
/// The byte offset that this segment is located at.
|
||||
std::size_t offset = 0;
|
||||
|
||||
/// The address to map this segment to.
|
||||
VAddr addr = 0;
|
||||
|
||||
/// The size of this segment in bytes.
|
||||
u32 size = 0;
|
||||
};
|
||||
|
||||
explicit CodeSet();
|
||||
~CodeSet();
|
||||
|
||||
CodeSet(const CodeSet&) = delete;
|
||||
CodeSet& operator=(const CodeSet&) = delete;
|
||||
|
||||
CodeSet(CodeSet&&) = default;
|
||||
CodeSet& operator=(CodeSet&&) = default;
|
||||
|
||||
Segment& CodeSegment() {
|
||||
return segments[0];
|
||||
}
|
||||
|
||||
const Segment& CodeSegment() const {
|
||||
return segments[0];
|
||||
}
|
||||
|
||||
Segment& RODataSegment() {
|
||||
return segments[1];
|
||||
}
|
||||
|
||||
const Segment& RODataSegment() const {
|
||||
return segments[1];
|
||||
}
|
||||
|
||||
Segment& DataSegment() {
|
||||
return segments[2];
|
||||
}
|
||||
|
||||
const Segment& DataSegment() const {
|
||||
return segments[2];
|
||||
}
|
||||
|
||||
/// The overall data that backs this code set.
|
||||
Kernel::PhysicalMemory memory;
|
||||
|
||||
/// The segments that comprise this code set.
|
||||
std::array<Segment, 3> segments;
|
||||
|
||||
/// The entry point address for this code set.
|
||||
VAddr entrypoint = 0;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
40
src/core/hle/kernel/errors.h
Executable file
40
src/core/hle/kernel/errors.h
Executable file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
// Confirmed Switch kernel error codes
|
||||
|
||||
constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
|
||||
constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
|
||||
constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
|
||||
constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
|
||||
constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
|
||||
constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
|
||||
constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
|
||||
constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
|
||||
constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
|
||||
constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108};
|
||||
constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110};
|
||||
constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113};
|
||||
constexpr ResultCode ERR_INVALID_THREAD_PRIORITY{ErrorModule::Kernel, 112};
|
||||
constexpr ResultCode ERR_INVALID_HANDLE{ErrorModule::Kernel, 114};
|
||||
constexpr ResultCode ERR_INVALID_POINTER{ErrorModule::Kernel, 115};
|
||||
constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116};
|
||||
constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117};
|
||||
constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118};
|
||||
constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119};
|
||||
constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120};
|
||||
constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121};
|
||||
constexpr ResultCode ERR_BUSY{ErrorModule::Kernel, 122};
|
||||
constexpr ResultCode ERR_SESSION_CLOSED_BY_REMOTE{ErrorModule::Kernel, 123};
|
||||
constexpr ResultCode ERR_INVALID_STATE{ErrorModule::Kernel, 125};
|
||||
constexpr ResultCode ERR_RESERVED_VALUE{ErrorModule::Kernel, 126};
|
||||
constexpr ResultCode ERR_RESOURCE_LIMIT_EXCEEDED{ErrorModule::Kernel, 132};
|
||||
|
||||
} // namespace Kernel
|
||||
52
src/core/hle/kernel/global_scheduler_context.cpp
Executable file
52
src/core/hle/kernel/global_scheduler_context.cpp
Executable file
@@ -0,0 +1,52 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/global_scheduler_context.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
|
||||
: kernel{kernel}, scheduler_lock{kernel} {}
|
||||
|
||||
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
|
||||
|
||||
void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||
thread_list.end());
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::PreemptThreads() {
|
||||
// The priority levels at which the global scheduler preempts threads every 10 ms. They are
|
||||
// ordered from Core 0 to Core 3.
|
||||
static constexpr std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities{
|
||||
59,
|
||||
59,
|
||||
59,
|
||||
63,
|
||||
};
|
||||
|
||||
ASSERT(IsLocked());
|
||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
const u32 priority = preemption_priorities[core_id];
|
||||
kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
|
||||
}
|
||||
}
|
||||
|
||||
bool GlobalSchedulerContext::IsLocked() const {
|
||||
return scheduler_lock.IsLockedByCurrentThread();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
81
src/core/hle/kernel/global_scheduler_context.h
Executable file
81
src/core/hle/kernel/global_scheduler_context.h
Executable file
@@ -0,0 +1,81 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/k_priority_queue.h"
|
||||
#include "core/hle/kernel/k_scheduler_lock.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class SchedulerLock;
|
||||
|
||||
using KSchedulerPriorityQueue =
|
||||
KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
|
||||
constexpr s32 HighestCoreMigrationAllowedPriority = 2;
|
||||
|
||||
class GlobalSchedulerContext final {
|
||||
friend class KScheduler;
|
||||
|
||||
public:
|
||||
using LockType = KAbstractSchedulerLock<KScheduler>;
|
||||
|
||||
explicit GlobalSchedulerContext(KernelCore& kernel);
|
||||
~GlobalSchedulerContext();
|
||||
|
||||
/// Adds a new thread to the scheduler
|
||||
void AddThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the scheduler
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Returns a list of all threads managed by the scheduler
|
||||
[[nodiscard]] const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rotates the scheduling queues of threads at a preemption priority and then does
|
||||
* some core rebalancing. Preemption priorities can be found in the array
|
||||
* 'preemption_priorities'.
|
||||
*
|
||||
* @note This operation happens every 10ms.
|
||||
*/
|
||||
void PreemptThreads();
|
||||
|
||||
/// Returns true if the global scheduler lock is acquired
|
||||
bool IsLocked() const;
|
||||
|
||||
[[nodiscard]] LockType& SchedulerLock() {
|
||||
return scheduler_lock;
|
||||
}
|
||||
|
||||
[[nodiscard]] const LockType& SchedulerLock() const {
|
||||
return scheduler_lock;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class KScopedSchedulerLock;
|
||||
friend class KScopedSchedulerLockAndSleep;
|
||||
|
||||
KernelCore& kernel;
|
||||
|
||||
std::atomic_bool scheduler_update_needed{};
|
||||
KSchedulerPriorityQueue priority_queue;
|
||||
LockType scheduler_lock;
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<std::shared_ptr<Thread>> thread_list;
|
||||
Common::SpinLock global_list_guard{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
127
src/core/hle/kernel/handle_table.cpp
Executable file
127
src/core/hle/kernel/handle_table.cpp
Executable file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <utility>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
constexpr u16 GetSlot(Handle handle) {
|
||||
return static_cast<u16>(handle >> 15);
|
||||
}
|
||||
|
||||
constexpr u16 GetGeneration(Handle handle) {
|
||||
return static_cast<u16>(handle & 0x7FFF);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
HandleTable::HandleTable(KernelCore& kernel) : kernel{kernel} {
|
||||
Clear();
|
||||
}
|
||||
|
||||
HandleTable::~HandleTable() = default;
|
||||
|
||||
ResultCode HandleTable::SetSize(s32 handle_table_size) {
|
||||
if (static_cast<u32>(handle_table_size) > MAX_COUNT) {
|
||||
LOG_ERROR(Kernel, "Handle table size {} is greater than {}", handle_table_size, MAX_COUNT);
|
||||
return ERR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
// Values less than or equal to zero indicate to use the maximum allowable
|
||||
// size for the handle table in the actual kernel, so we ignore the given
|
||||
// value in that case, since we assume this by default unless this function
|
||||
// is called.
|
||||
if (handle_table_size > 0) {
|
||||
table_size = static_cast<u16>(handle_table_size);
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
|
||||
DEBUG_ASSERT(obj != nullptr);
|
||||
|
||||
const u16 slot = next_free_slot;
|
||||
if (slot >= table_size) {
|
||||
LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
|
||||
return ERR_HANDLE_TABLE_FULL;
|
||||
}
|
||||
next_free_slot = generations[slot];
|
||||
|
||||
const u16 generation = next_generation++;
|
||||
|
||||
// Overflow count so it fits in the 15 bits dedicated to the generation in the handle.
|
||||
// Horizon OS uses zero to represent an invalid handle, so skip to 1.
|
||||
if (next_generation >= (1 << 15)) {
|
||||
next_generation = 1;
|
||||
}
|
||||
|
||||
generations[slot] = generation;
|
||||
objects[slot] = std::move(obj);
|
||||
|
||||
Handle handle = generation | (slot << 15);
|
||||
return MakeResult<Handle>(handle);
|
||||
}
|
||||
|
||||
ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
|
||||
std::shared_ptr<Object> object = GetGeneric(handle);
|
||||
if (object == nullptr) {
|
||||
LOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
return Create(std::move(object));
|
||||
}
|
||||
|
||||
ResultCode HandleTable::Close(Handle handle) {
|
||||
if (!IsValid(handle)) {
|
||||
LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
const u16 slot = GetSlot(handle);
|
||||
|
||||
objects[slot] = nullptr;
|
||||
|
||||
generations[slot] = next_free_slot;
|
||||
next_free_slot = slot;
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
bool HandleTable::IsValid(Handle handle) const {
|
||||
const std::size_t slot = GetSlot(handle);
|
||||
const u16 generation = GetGeneration(handle);
|
||||
|
||||
return slot < table_size && objects[slot] != nullptr && generations[slot] == generation;
|
||||
}
|
||||
|
||||
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
|
||||
if (handle == CurrentThread) {
|
||||
return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||
} else if (handle == CurrentProcess) {
|
||||
return SharedFrom(kernel.CurrentProcess());
|
||||
}
|
||||
|
||||
if (!IsValid(handle)) {
|
||||
return nullptr;
|
||||
}
|
||||
return objects[GetSlot(handle)];
|
||||
}
|
||||
|
||||
void HandleTable::Clear() {
|
||||
for (u16 i = 0; i < table_size; ++i) {
|
||||
generations[i] = static_cast<u16>(i + 1);
|
||||
objects[i] = nullptr;
|
||||
}
|
||||
next_free_slot = 0;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
144
src/core/hle/kernel/handle_table.h
Executable file
144
src/core/hle/kernel/handle_table.h
Executable file
@@ -0,0 +1,144 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
enum KernelHandle : Handle {
|
||||
InvalidHandle = 0,
|
||||
CurrentThread = 0xFFFF8000,
|
||||
CurrentProcess = 0xFFFF8001,
|
||||
};
|
||||
|
||||
/**
|
||||
* This class allows the creation of Handles, which are references to objects that can be tested
|
||||
* for validity and looked up. Here they are used to pass references to kernel objects to/from the
|
||||
* emulated process. it has been designed so that it follows the same handle format and has
|
||||
* approximately the same restrictions as the handle manager in the CTR-OS.
|
||||
*
|
||||
* Handles contain two sub-fields: a slot index (bits 31:15) and a generation value (bits 14:0).
|
||||
* The slot index is used to index into the arrays in this class to access the data corresponding
|
||||
* to the Handle.
|
||||
*
|
||||
* To prevent accidental use of a freed Handle whose slot has already been reused, a global counter
|
||||
* is kept and incremented every time a Handle is created. This is the Handle's "generation". The
|
||||
* value of the counter is stored into the Handle as well as in the handle table (in the
|
||||
* "generations" array). When looking up a handle, the Handle's generation must match with the
|
||||
* value stored on the class, otherwise the Handle is considered invalid.
|
||||
*
|
||||
* To find free slots when allocating a Handle without needing to scan the entire object array, the
|
||||
* generations field of unallocated slots is re-purposed as a linked list of indices to free slots.
|
||||
* When a Handle is created, an index is popped off the list and used for the new Handle. When it
|
||||
* is destroyed, it is again pushed onto the list to be re-used by the next allocation. It is
|
||||
* likely that this allocation strategy differs from the one used in CTR-OS, but this hasn't been
|
||||
* verified and isn't likely to cause any problems.
|
||||
*/
|
||||
class HandleTable final : NonCopyable {
|
||||
public:
|
||||
/// This is the maximum limit of handles allowed per process in Horizon
|
||||
static constexpr std::size_t MAX_COUNT = 1024;
|
||||
|
||||
explicit HandleTable(KernelCore& kernel);
|
||||
~HandleTable();
|
||||
|
||||
/**
|
||||
* Sets the number of handles that may be in use at one time
|
||||
* for this handle table.
|
||||
*
|
||||
* @param handle_table_size The desired size to limit the handle table to.
|
||||
*
|
||||
* @returns an error code indicating if initialization was successful.
|
||||
* If initialization was not successful, then ERR_OUT_OF_MEMORY
|
||||
* will be returned.
|
||||
*
|
||||
* @pre handle_table_size must be within the range [0, 1024]
|
||||
*/
|
||||
ResultCode SetSize(s32 handle_table_size);
|
||||
|
||||
/**
|
||||
* Allocates a handle for the given object.
|
||||
* @return The created Handle or one of the following errors:
|
||||
* - `ERR_HANDLE_TABLE_FULL`: the maximum number of handles has been exceeded.
|
||||
*/
|
||||
ResultVal<Handle> Create(std::shared_ptr<Object> obj);
|
||||
|
||||
/**
|
||||
* Returns a new handle that points to the same object as the passed in handle.
|
||||
* @return The duplicated Handle or one of the following errors:
|
||||
* - `ERR_INVALID_HANDLE`: an invalid handle was passed in.
|
||||
* - Any errors returned by `Create()`.
|
||||
*/
|
||||
ResultVal<Handle> Duplicate(Handle handle);
|
||||
|
||||
/**
|
||||
* Closes a handle, removing it from the table and decreasing the object's ref-count.
|
||||
* @return `RESULT_SUCCESS` or one of the following errors:
|
||||
* - `ERR_INVALID_HANDLE`: an invalid handle was passed in.
|
||||
*/
|
||||
ResultCode Close(Handle handle);
|
||||
|
||||
/// Checks if a handle is valid and points to an existing object.
|
||||
bool IsValid(Handle handle) const;
|
||||
|
||||
/**
|
||||
* Looks up a handle.
|
||||
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid.
|
||||
*/
|
||||
std::shared_ptr<Object> GetGeneric(Handle handle) const;
|
||||
|
||||
/**
|
||||
* Looks up a handle while verifying its type.
|
||||
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid or its
|
||||
* type differs from the requested one.
|
||||
*/
|
||||
template <class T>
|
||||
std::shared_ptr<T> Get(Handle handle) const {
|
||||
return DynamicObjectCast<T>(GetGeneric(handle));
|
||||
}
|
||||
|
||||
/// Closes all handles held in this table.
|
||||
void Clear();
|
||||
|
||||
private:
|
||||
/// Stores the Object referenced by the handle or null if the slot is empty.
|
||||
std::array<std::shared_ptr<Object>, MAX_COUNT> objects;
|
||||
|
||||
/**
|
||||
* The value of `next_generation` when the handle was created, used to check for validity. For
|
||||
* empty slots, contains the index of the next free slot in the list.
|
||||
*/
|
||||
std::array<u16, MAX_COUNT> generations;
|
||||
|
||||
/**
|
||||
* The limited size of the handle table. This can be specified by process
|
||||
* capabilities in order to restrict the overall number of handles that
|
||||
* can be created in a process instance
|
||||
*/
|
||||
u16 table_size = static_cast<u16>(MAX_COUNT);
|
||||
|
||||
/**
|
||||
* Global counter of the number of created handles. Stored in `generations` when a handle is
|
||||
* created, and wraps around to 1 when it hits 0x8000.
|
||||
*/
|
||||
u16 next_generation = 1;
|
||||
|
||||
/// Head of the free slots linked list.
|
||||
u16 next_free_slot = 0;
|
||||
|
||||
/// Underlying kernel instance that this handle table operates under.
|
||||
KernelCore& kernel;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
392
src/core/hle/kernel/hle_ipc.cpp
Executable file
392
src/core/hle/kernel/hle_ipc.cpp
Executable file
@@ -0,0 +1,392 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <sstream>
|
||||
#include <utility>
|
||||
|
||||
#include <boost/range/algorithm_ext/erase.hpp>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
SessionRequestHandler::SessionRequestHandler() = default;
|
||||
|
||||
SessionRequestHandler::~SessionRequestHandler() = default;
|
||||
|
||||
void SessionRequestHandler::ClientConnected(std::shared_ptr<ServerSession> server_session) {
|
||||
server_session->SetHleHandler(shared_from_this());
|
||||
connected_sessions.push_back(std::move(server_session));
|
||||
}
|
||||
|
||||
void SessionRequestHandler::ClientDisconnected(
|
||||
const std::shared_ptr<ServerSession>& server_session) {
|
||||
server_session->SetHleHandler(nullptr);
|
||||
boost::range::remove_erase(connected_sessions, server_session);
|
||||
}
|
||||
|
||||
HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
|
||||
std::shared_ptr<ServerSession> server_session,
|
||||
std::shared_ptr<Thread> thread)
|
||||
: server_session(std::move(server_session)),
|
||||
thread(std::move(thread)), kernel{kernel}, memory{memory} {
|
||||
cmd_buf[0] = 0;
|
||||
}
|
||||
|
||||
HLERequestContext::~HLERequestContext() = default;
|
||||
|
||||
void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf,
|
||||
bool incoming) {
|
||||
IPC::RequestParser rp(src_cmdbuf);
|
||||
command_header = rp.PopRaw<IPC::CommandHeader>();
|
||||
|
||||
if (command_header->type == IPC::CommandType::Close) {
|
||||
// Close does not populate the rest of the IPC header
|
||||
return;
|
||||
}
|
||||
|
||||
// If handle descriptor is present, add size of it
|
||||
if (command_header->enable_handle_descriptor) {
|
||||
handle_descriptor_header = rp.PopRaw<IPC::HandleDescriptorHeader>();
|
||||
if (handle_descriptor_header->send_current_pid) {
|
||||
rp.Skip(2, false);
|
||||
}
|
||||
if (incoming) {
|
||||
// Populate the object lists with the data in the IPC request.
|
||||
for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_copy; ++handle) {
|
||||
copy_objects.push_back(handle_table.GetGeneric(rp.Pop<Handle>()));
|
||||
}
|
||||
for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_move; ++handle) {
|
||||
move_objects.push_back(handle_table.GetGeneric(rp.Pop<Handle>()));
|
||||
}
|
||||
} else {
|
||||
// For responses we just ignore the handles, they're empty and will be populated when
|
||||
// translating the response.
|
||||
rp.Skip(handle_descriptor_header->num_handles_to_copy, false);
|
||||
rp.Skip(handle_descriptor_header->num_handles_to_move, false);
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < command_header->num_buf_x_descriptors; ++i) {
|
||||
buffer_x_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorX>());
|
||||
}
|
||||
for (unsigned i = 0; i < command_header->num_buf_a_descriptors; ++i) {
|
||||
buffer_a_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorABW>());
|
||||
}
|
||||
for (unsigned i = 0; i < command_header->num_buf_b_descriptors; ++i) {
|
||||
buffer_b_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorABW>());
|
||||
}
|
||||
for (unsigned i = 0; i < command_header->num_buf_w_descriptors; ++i) {
|
||||
buffer_w_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorABW>());
|
||||
}
|
||||
|
||||
buffer_c_offset = rp.GetCurrentOffset() + command_header->data_size;
|
||||
|
||||
// Padding to align to 16 bytes
|
||||
rp.AlignWithPadding();
|
||||
|
||||
if (Session()->IsDomain() && ((command_header->type == IPC::CommandType::Request ||
|
||||
command_header->type == IPC::CommandType::RequestWithContext) ||
|
||||
!incoming)) {
|
||||
// If this is an incoming message, only CommandType "Request" has a domain header
|
||||
// All outgoing domain messages have the domain header, if only incoming has it
|
||||
if (incoming || domain_message_header) {
|
||||
domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
|
||||
} else {
|
||||
if (Session()->IsDomain()) {
|
||||
LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data_payload_header = rp.PopRaw<IPC::DataPayloadHeader>();
|
||||
|
||||
data_payload_offset = rp.GetCurrentOffset();
|
||||
|
||||
if (domain_message_header && domain_message_header->command ==
|
||||
IPC::DomainMessageHeader::CommandType::CloseVirtualHandle) {
|
||||
// CloseVirtualHandle command does not have SFC* or any data
|
||||
return;
|
||||
}
|
||||
|
||||
if (incoming) {
|
||||
ASSERT(data_payload_header->magic == Common::MakeMagic('S', 'F', 'C', 'I'));
|
||||
} else {
|
||||
ASSERT(data_payload_header->magic == Common::MakeMagic('S', 'F', 'C', 'O'));
|
||||
}
|
||||
|
||||
rp.SetCurrentOffset(buffer_c_offset);
|
||||
|
||||
// For Inline buffers, the response data is written directly to buffer_c_offset
|
||||
// and in this case we don't have any BufferDescriptorC on the request.
|
||||
if (command_header->buf_c_descriptor_flags >
|
||||
IPC::CommandHeader::BufferDescriptorCFlag::InlineDescriptor) {
|
||||
if (command_header->buf_c_descriptor_flags ==
|
||||
IPC::CommandHeader::BufferDescriptorCFlag::OneDescriptor) {
|
||||
buffer_c_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorC>());
|
||||
} else {
|
||||
unsigned num_buf_c_descriptors =
|
||||
static_cast<unsigned>(command_header->buf_c_descriptor_flags.Value()) - 2;
|
||||
|
||||
// This is used to detect possible underflows, in case something is broken
|
||||
// with the two ifs above and the flags value is == 0 || == 1.
|
||||
ASSERT(num_buf_c_descriptors < 14);
|
||||
|
||||
for (unsigned i = 0; i < num_buf_c_descriptors; ++i) {
|
||||
buffer_c_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorC>());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rp.SetCurrentOffset(data_payload_offset);
|
||||
|
||||
command = rp.Pop<u32_le>();
|
||||
rp.Skip(1, false); // The command is actually an u64, but we don't use the high part.
|
||||
}
|
||||
|
||||
ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
|
||||
u32_le* src_cmdbuf) {
|
||||
ParseCommandBuffer(handle_table, src_cmdbuf, true);
|
||||
if (command_header->type == IPC::CommandType::Close) {
|
||||
// Close does not populate the rest of the IPC header
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
// The data_size already includes the payload header, the padding and the domain header.
|
||||
std::size_t size = data_payload_offset + command_header->data_size -
|
||||
sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
|
||||
if (domain_message_header)
|
||||
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
|
||||
std::copy_n(src_cmdbuf, size, cmd_buf.begin());
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
|
||||
auto& owner_process = *thread.GetOwnerProcess();
|
||||
auto& handle_table = owner_process.GetHandleTable();
|
||||
|
||||
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmdbuf;
|
||||
memory.ReadBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(),
|
||||
dst_cmdbuf.size() * sizeof(u32));
|
||||
|
||||
// The header was already built in the internal command buffer. Attempt to parse it to verify
|
||||
// the integrity and then copy it over to the target command buffer.
|
||||
ParseCommandBuffer(handle_table, cmd_buf.data(), false);
|
||||
|
||||
// The data_size already includes the payload header, the padding and the domain header.
|
||||
std::size_t size = data_payload_offset + command_header->data_size -
|
||||
sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
|
||||
if (domain_message_header)
|
||||
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
|
||||
|
||||
std::copy_n(cmd_buf.begin(), size, dst_cmdbuf.data());
|
||||
|
||||
if (command_header->enable_handle_descriptor) {
|
||||
ASSERT_MSG(!move_objects.empty() || !copy_objects.empty(),
|
||||
"Handle descriptor bit set but no handles to translate");
|
||||
// We write the translated handles at a specific offset in the command buffer, this space
|
||||
// was already reserved when writing the header.
|
||||
std::size_t current_offset =
|
||||
(sizeof(IPC::CommandHeader) + sizeof(IPC::HandleDescriptorHeader)) / sizeof(u32);
|
||||
ASSERT_MSG(!handle_descriptor_header->send_current_pid, "Sending PID is not implemented");
|
||||
|
||||
ASSERT(copy_objects.size() == handle_descriptor_header->num_handles_to_copy);
|
||||
ASSERT(move_objects.size() == handle_descriptor_header->num_handles_to_move);
|
||||
|
||||
// We don't make a distinction between copy and move handles when translating since HLE
|
||||
// services don't deal with handles directly. However, the guest applications might check
|
||||
// for specific values in each of these descriptors.
|
||||
for (auto& object : copy_objects) {
|
||||
ASSERT(object != nullptr);
|
||||
dst_cmdbuf[current_offset++] = handle_table.Create(object).Unwrap();
|
||||
}
|
||||
|
||||
for (auto& object : move_objects) {
|
||||
ASSERT(object != nullptr);
|
||||
dst_cmdbuf[current_offset++] = handle_table.Create(object).Unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(Subv): Translate the X/A/B/W buffers.
|
||||
|
||||
if (Session()->IsDomain() && domain_message_header) {
|
||||
ASSERT(domain_message_header->num_objects == domain_objects.size());
|
||||
// Write the domain objects to the command buffer, these go after the raw untranslated data.
|
||||
// TODO(Subv): This completely ignores C buffers.
|
||||
std::size_t domain_offset = size - domain_message_header->num_objects;
|
||||
|
||||
for (const auto& object : domain_objects) {
|
||||
server_session->AppendDomainRequestHandler(object);
|
||||
dst_cmdbuf[domain_offset++] =
|
||||
static_cast<u32_le>(server_session->NumDomainRequestHandlers());
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the translated command buffer back into the thread's command buffer area.
|
||||
memory.WriteBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(),
|
||||
dst_cmdbuf.size() * sizeof(u32));
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
|
||||
std::vector<u8> buffer{};
|
||||
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
|
||||
BufferDescriptorA()[buffer_index].Size()};
|
||||
|
||||
if (is_buffer_a) {
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
BufferDescriptorA().size() > buffer_index, { return buffer; },
|
||||
"BufferDescriptorA invalid buffer_index {}", buffer_index);
|
||||
buffer.resize(BufferDescriptorA()[buffer_index].Size());
|
||||
memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size());
|
||||
} else {
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
BufferDescriptorX().size() > buffer_index, { return buffer; },
|
||||
"BufferDescriptorX invalid buffer_index {}", buffer_index);
|
||||
buffer.resize(BufferDescriptorX()[buffer_index].Size());
|
||||
memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size());
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
|
||||
std::size_t buffer_index) const {
|
||||
if (size == 0) {
|
||||
LOG_WARNING(Core, "skip empty buffer write");
|
||||
return 0;
|
||||
}
|
||||
|
||||
const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
|
||||
BufferDescriptorB()[buffer_index].Size()};
|
||||
const std::size_t buffer_size{GetWriteBufferSize(buffer_index)};
|
||||
if (size > buffer_size) {
|
||||
LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
|
||||
buffer_size);
|
||||
size = buffer_size; // TODO(bunnei): This needs to be HW tested
|
||||
}
|
||||
|
||||
if (is_buffer_b) {
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
BufferDescriptorB().size() > buffer_index &&
|
||||
BufferDescriptorB()[buffer_index].Size() >= size,
|
||||
{ return 0; }, "BufferDescriptorB is invalid, index={}, size={}", buffer_index, size);
|
||||
memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
|
||||
} else {
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
BufferDescriptorC().size() > buffer_index &&
|
||||
BufferDescriptorC()[buffer_index].Size() >= size,
|
||||
{ return 0; }, "BufferDescriptorC is invalid, index={}, size={}", buffer_index, size);
|
||||
memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size);
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
std::size_t HLERequestContext::GetReadBufferSize(std::size_t buffer_index) const {
|
||||
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
|
||||
BufferDescriptorA()[buffer_index].Size()};
|
||||
if (is_buffer_a) {
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
BufferDescriptorA().size() > buffer_index, { return 0; },
|
||||
"BufferDescriptorA invalid buffer_index {}", buffer_index);
|
||||
return BufferDescriptorA()[buffer_index].Size();
|
||||
} else {
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
BufferDescriptorX().size() > buffer_index, { return 0; },
|
||||
"BufferDescriptorX invalid buffer_index {}", buffer_index);
|
||||
return BufferDescriptorX()[buffer_index].Size();
|
||||
}
|
||||
}
|
||||
|
||||
std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) const {
|
||||
const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
|
||||
BufferDescriptorB()[buffer_index].Size()};
|
||||
if (is_buffer_b) {
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
BufferDescriptorB().size() > buffer_index, { return 0; },
|
||||
"BufferDescriptorB invalid buffer_index {}", buffer_index);
|
||||
return BufferDescriptorB()[buffer_index].Size();
|
||||
} else {
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
BufferDescriptorC().size() > buffer_index, { return 0; },
|
||||
"BufferDescriptorC invalid buffer_index {}", buffer_index);
|
||||
return BufferDescriptorC()[buffer_index].Size();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::string HLERequestContext::Description() const {
|
||||
if (!command_header) {
|
||||
return "No command header available";
|
||||
}
|
||||
std::ostringstream s;
|
||||
s << "IPC::CommandHeader: Type:" << static_cast<u32>(command_header->type.Value());
|
||||
s << ", X(Pointer):" << command_header->num_buf_x_descriptors;
|
||||
if (command_header->num_buf_x_descriptors) {
|
||||
s << '[';
|
||||
for (u64 i = 0; i < command_header->num_buf_x_descriptors; ++i) {
|
||||
s << "0x" << std::hex << BufferDescriptorX()[i].Size();
|
||||
if (i < command_header->num_buf_x_descriptors - 1)
|
||||
s << ", ";
|
||||
}
|
||||
s << ']';
|
||||
}
|
||||
s << ", A(Send):" << command_header->num_buf_a_descriptors;
|
||||
if (command_header->num_buf_a_descriptors) {
|
||||
s << '[';
|
||||
for (u64 i = 0; i < command_header->num_buf_a_descriptors; ++i) {
|
||||
s << "0x" << std::hex << BufferDescriptorA()[i].Size();
|
||||
if (i < command_header->num_buf_a_descriptors - 1)
|
||||
s << ", ";
|
||||
}
|
||||
s << ']';
|
||||
}
|
||||
s << ", B(Receive):" << command_header->num_buf_b_descriptors;
|
||||
if (command_header->num_buf_b_descriptors) {
|
||||
s << '[';
|
||||
for (u64 i = 0; i < command_header->num_buf_b_descriptors; ++i) {
|
||||
s << "0x" << std::hex << BufferDescriptorB()[i].Size();
|
||||
if (i < command_header->num_buf_b_descriptors - 1)
|
||||
s << ", ";
|
||||
}
|
||||
s << ']';
|
||||
}
|
||||
s << ", C(ReceiveList):" << BufferDescriptorC().size();
|
||||
if (!BufferDescriptorC().empty()) {
|
||||
s << '[';
|
||||
for (u64 i = 0; i < BufferDescriptorC().size(); ++i) {
|
||||
s << "0x" << std::hex << BufferDescriptorC()[i].Size();
|
||||
if (i < BufferDescriptorC().size() - 1)
|
||||
s << ", ";
|
||||
}
|
||||
s << ']';
|
||||
}
|
||||
s << ", data_size:" << command_header->data_size.Value();
|
||||
|
||||
return s.str();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
310
src/core/hle/kernel/hle_ipc.h
Executable file
310
src/core/hle/kernel/hle_ipc.h
Executable file
@@ -0,0 +1,310 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
#include <boost/container/small_vector.hpp>
|
||||
#include "common/common_types.h"
|
||||
#include "common/concepts.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/ipc.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace IPC {
|
||||
class ResponseBuilder;
|
||||
}
|
||||
|
||||
namespace Service {
|
||||
class ServiceFrameworkBase;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Domain;
|
||||
class HandleTable;
|
||||
class HLERequestContext;
|
||||
class KernelCore;
|
||||
class Process;
|
||||
class ServerSession;
|
||||
class Thread;
|
||||
class ReadableEvent;
|
||||
class WritableEvent;
|
||||
|
||||
enum class ThreadWakeupReason;
|
||||
|
||||
/**
|
||||
* Interface implemented by HLE Session handlers.
|
||||
* This can be provided to a ServerSession in order to hook into several relevant events
|
||||
* (such as a new connection or a SyncRequest) so they can be implemented in the emulator.
|
||||
*/
|
||||
class SessionRequestHandler : public std::enable_shared_from_this<SessionRequestHandler> {
|
||||
public:
|
||||
SessionRequestHandler();
|
||||
virtual ~SessionRequestHandler();
|
||||
|
||||
/**
|
||||
* Handles a sync request from the emulated application.
|
||||
* @param server_session The ServerSession that was triggered for this sync request,
|
||||
* it should be used to differentiate which client (As in ClientSession) we're answering to.
|
||||
* TODO(Subv): Use a wrapper structure to hold all the information relevant to
|
||||
* this request (ServerSession, Originator thread, Translated command buffer, etc).
|
||||
* @returns ResultCode the result code of the translate operation.
|
||||
*/
|
||||
virtual ResultCode HandleSyncRequest(Kernel::HLERequestContext& context) = 0;
|
||||
|
||||
/**
|
||||
* Signals that a client has just connected to this HLE handler and keeps the
|
||||
* associated ServerSession alive for the duration of the connection.
|
||||
* @param server_session Owning pointer to the ServerSession associated with the connection.
|
||||
*/
|
||||
void ClientConnected(std::shared_ptr<ServerSession> server_session);
|
||||
|
||||
/**
|
||||
* Signals that a client has just disconnected from this HLE handler and releases the
|
||||
* associated ServerSession.
|
||||
* @param server_session ServerSession associated with the connection.
|
||||
*/
|
||||
void ClientDisconnected(const std::shared_ptr<ServerSession>& server_session);
|
||||
|
||||
protected:
|
||||
/// List of sessions that are connected to this handler.
|
||||
/// A ServerSession whose server endpoint is an HLE implementation is kept alive by this list
|
||||
/// for the duration of the connection.
|
||||
std::vector<std::shared_ptr<ServerSession>> connected_sessions;
|
||||
};
|
||||
|
||||
/**
|
||||
* Class containing information about an in-flight IPC request being handled by an HLE service
|
||||
* implementation. Services should avoid using old global APIs (e.g. Kernel::GetCommandBuffer()) and
|
||||
* when possible use the APIs in this class to service the request.
|
||||
*
|
||||
* HLE handle protocol
|
||||
* ===================
|
||||
*
|
||||
* To avoid needing HLE services to keep a separate handle table, or having to directly modify the
|
||||
* requester's table, a tweaked protocol is used to receive and send handles in requests. The kernel
|
||||
* will decode the incoming handles into object pointers and insert a id in the buffer where the
|
||||
* handle would normally be. The service then calls GetIncomingHandle() with that id to get the
|
||||
* pointer to the object. Similarly, instead of inserting a handle into the command buffer, the
|
||||
* service calls AddOutgoingHandle() and stores the returned id where the handle would normally go.
|
||||
*
|
||||
* The end result is similar to just giving services their own real handle tables, but since these
|
||||
* ids are local to a specific context, it avoids requiring services to manage handles for objects
|
||||
* across multiple calls and ensuring that unneeded handles are cleaned up.
|
||||
*/
|
||||
class HLERequestContext {
|
||||
public:
|
||||
explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
|
||||
std::shared_ptr<ServerSession> session,
|
||||
std::shared_ptr<Thread> thread);
|
||||
~HLERequestContext();
|
||||
|
||||
/// Returns a pointer to the IPC command buffer for this request.
|
||||
u32* CommandBuffer() {
|
||||
return cmd_buf.data();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the session through which this request was made. This can be used as a map key to
|
||||
* access per-client data on services.
|
||||
*/
|
||||
const std::shared_ptr<Kernel::ServerSession>& Session() const {
|
||||
return server_session;
|
||||
}
|
||||
|
||||
using WakeupCallback = std::function<void(
|
||||
std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
|
||||
|
||||
/// Populates this context with data from the requesting process/thread.
|
||||
ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
|
||||
u32_le* src_cmdbuf);
|
||||
|
||||
/// Writes data from this context back to the requesting process/thread.
|
||||
ResultCode WriteToOutgoingCommandBuffer(Thread& thread);
|
||||
|
||||
u32_le GetCommand() const {
|
||||
return command;
|
||||
}
|
||||
|
||||
IPC::CommandType GetCommandType() const {
|
||||
return command_header->type;
|
||||
}
|
||||
|
||||
unsigned GetDataPayloadOffset() const {
|
||||
return data_payload_offset;
|
||||
}
|
||||
|
||||
const std::vector<IPC::BufferDescriptorX>& BufferDescriptorX() const {
|
||||
return buffer_x_desciptors;
|
||||
}
|
||||
|
||||
const std::vector<IPC::BufferDescriptorABW>& BufferDescriptorA() const {
|
||||
return buffer_a_desciptors;
|
||||
}
|
||||
|
||||
const std::vector<IPC::BufferDescriptorABW>& BufferDescriptorB() const {
|
||||
return buffer_b_desciptors;
|
||||
}
|
||||
|
||||
const std::vector<IPC::BufferDescriptorC>& BufferDescriptorC() const {
|
||||
return buffer_c_desciptors;
|
||||
}
|
||||
|
||||
const IPC::DomainMessageHeader& GetDomainMessageHeader() const {
|
||||
return domain_message_header.value();
|
||||
}
|
||||
|
||||
bool HasDomainMessageHeader() const {
|
||||
return domain_message_header.has_value();
|
||||
}
|
||||
|
||||
/// Helper function to read a buffer using the appropriate buffer descriptor
|
||||
std::vector<u8> ReadBuffer(std::size_t buffer_index = 0) const;
|
||||
|
||||
/// Helper function to write a buffer using the appropriate buffer descriptor
|
||||
std::size_t WriteBuffer(const void* buffer, std::size_t size,
|
||||
std::size_t buffer_index = 0) const;
|
||||
|
||||
/* Helper function to write a buffer using the appropriate buffer descriptor
|
||||
*
|
||||
* @tparam T an arbitrary container that satisfies the
|
||||
* ContiguousContainer concept in the C++ standard library or a trivially copyable type.
|
||||
*
|
||||
* @param data The container/data to write into a buffer.
|
||||
* @param buffer_index The buffer in particular to write to.
|
||||
*/
|
||||
template <typename T, typename = std::enable_if_t<!std::is_pointer_v<T>>>
|
||||
std::size_t WriteBuffer(const T& data, std::size_t buffer_index = 0) const {
|
||||
if constexpr (Common::IsSTLContainer<T>) {
|
||||
using ContiguousType = typename T::value_type;
|
||||
static_assert(std::is_trivially_copyable_v<ContiguousType>,
|
||||
"Container to WriteBuffer must contain trivially copyable objects");
|
||||
return WriteBuffer(std::data(data), std::size(data) * sizeof(ContiguousType),
|
||||
buffer_index);
|
||||
} else {
|
||||
static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable");
|
||||
return WriteBuffer(&data, sizeof(T), buffer_index);
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to get the size of the input buffer
|
||||
std::size_t GetReadBufferSize(std::size_t buffer_index = 0) const;
|
||||
|
||||
/// Helper function to get the size of the output buffer
|
||||
std::size_t GetWriteBufferSize(std::size_t buffer_index = 0) const;
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetCopyObject(std::size_t index) {
|
||||
return DynamicObjectCast<T>(copy_objects.at(index));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetMoveObject(std::size_t index) {
|
||||
return DynamicObjectCast<T>(move_objects.at(index));
|
||||
}
|
||||
|
||||
void AddMoveObject(std::shared_ptr<Object> object) {
|
||||
move_objects.emplace_back(std::move(object));
|
||||
}
|
||||
|
||||
void AddCopyObject(std::shared_ptr<Object> object) {
|
||||
copy_objects.emplace_back(std::move(object));
|
||||
}
|
||||
|
||||
void AddDomainObject(std::shared_ptr<SessionRequestHandler> object) {
|
||||
domain_objects.emplace_back(std::move(object));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const {
|
||||
return std::static_pointer_cast<T>(domain_request_handlers.at(index));
|
||||
}
|
||||
|
||||
void SetDomainRequestHandlers(
|
||||
const std::vector<std::shared_ptr<SessionRequestHandler>>& handlers) {
|
||||
domain_request_handlers = handlers;
|
||||
}
|
||||
|
||||
/// Clears the list of objects so that no lingering objects are written accidentally to the
|
||||
/// response buffer.
|
||||
void ClearIncomingObjects() {
|
||||
move_objects.clear();
|
||||
copy_objects.clear();
|
||||
domain_objects.clear();
|
||||
}
|
||||
|
||||
std::size_t NumMoveObjects() const {
|
||||
return move_objects.size();
|
||||
}
|
||||
|
||||
std::size_t NumCopyObjects() const {
|
||||
return copy_objects.size();
|
||||
}
|
||||
|
||||
std::size_t NumDomainObjects() const {
|
||||
return domain_objects.size();
|
||||
}
|
||||
|
||||
std::string Description() const;
|
||||
|
||||
Thread& GetThread() {
|
||||
return *thread;
|
||||
}
|
||||
|
||||
const Thread& GetThread() const {
|
||||
return *thread;
|
||||
}
|
||||
|
||||
bool IsThreadWaiting() const {
|
||||
return is_thread_waiting;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class IPC::ResponseBuilder;
|
||||
|
||||
void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming);
|
||||
|
||||
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
|
||||
std::shared_ptr<Kernel::ServerSession> server_session;
|
||||
std::shared_ptr<Thread> thread;
|
||||
// TODO(yuriks): Check common usage of this and optimize size accordingly
|
||||
boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects;
|
||||
boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects;
|
||||
boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects;
|
||||
|
||||
std::optional<IPC::CommandHeader> command_header;
|
||||
std::optional<IPC::HandleDescriptorHeader> handle_descriptor_header;
|
||||
std::optional<IPC::DataPayloadHeader> data_payload_header;
|
||||
std::optional<IPC::DomainMessageHeader> domain_message_header;
|
||||
std::vector<IPC::BufferDescriptorX> buffer_x_desciptors;
|
||||
std::vector<IPC::BufferDescriptorABW> buffer_a_desciptors;
|
||||
std::vector<IPC::BufferDescriptorABW> buffer_b_desciptors;
|
||||
std::vector<IPC::BufferDescriptorABW> buffer_w_desciptors;
|
||||
std::vector<IPC::BufferDescriptorC> buffer_c_desciptors;
|
||||
|
||||
unsigned data_payload_offset{};
|
||||
unsigned buffer_c_offset{};
|
||||
u32_le command{};
|
||||
|
||||
std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
|
||||
bool is_thread_waiting{};
|
||||
|
||||
KernelCore& kernel;
|
||||
Core::Memory::Memory& memory;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
58
src/core/hle/kernel/k_affinity_mask.h
Executable file
58
src/core/hle/kernel/k_affinity_mask.h
Executable file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hardware_properties.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KAffinityMask {
|
||||
public:
|
||||
constexpr KAffinityMask() = default;
|
||||
|
||||
[[nodiscard]] constexpr u64 GetAffinityMask() const {
|
||||
return this->mask;
|
||||
}
|
||||
|
||||
constexpr void SetAffinityMask(u64 new_mask) {
|
||||
ASSERT((new_mask & ~AllowedAffinityMask) == 0);
|
||||
this->mask = new_mask;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr bool GetAffinity(s32 core) const {
|
||||
return this->mask & GetCoreBit(core);
|
||||
}
|
||||
|
||||
constexpr void SetAffinity(s32 core, bool set) {
|
||||
ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
|
||||
if (set) {
|
||||
this->mask |= GetCoreBit(core);
|
||||
} else {
|
||||
this->mask &= ~GetCoreBit(core);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void SetAll() {
|
||||
this->mask = AllowedAffinityMask;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] static constexpr u64 GetCoreBit(s32 core) {
|
||||
ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
return (1ULL << core);
|
||||
}
|
||||
|
||||
static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1;
|
||||
|
||||
u64 mask{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
449
src/core/hle/kernel/k_priority_queue.h
Executable file
449
src/core/hle/kernel/k_priority_queue.h
Executable file
@@ -0,0 +1,449 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_set.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Thread;
|
||||
|
||||
template <typename T>
|
||||
concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
|
||||
{ t.GetAffinityMask() }
|
||||
->std::convertible_to<u64>;
|
||||
{t.SetAffinityMask(std::declval<u64>())};
|
||||
|
||||
{ t.GetAffinity(std::declval<int32_t>()) }
|
||||
->std::same_as<bool>;
|
||||
{t.SetAffinity(std::declval<int32_t>(), std::declval<bool>())};
|
||||
{t.SetAll()};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
|
||||
{typename T::QueueEntry()};
|
||||
{(typename T::QueueEntry()).Initialize()};
|
||||
{(typename T::QueueEntry()).SetPrev(std::addressof(t))};
|
||||
{(typename T::QueueEntry()).SetNext(std::addressof(t))};
|
||||
{ (typename T::QueueEntry()).GetNext() }
|
||||
->std::same_as<T*>;
|
||||
{ (typename T::QueueEntry()).GetPrev() }
|
||||
->std::same_as<T*>;
|
||||
{ t.GetPriorityQueueEntry(std::declval<s32>()) }
|
||||
->std::same_as<typename T::QueueEntry&>;
|
||||
|
||||
{t.GetAffinityMask()};
|
||||
{ typename std::remove_cvref<decltype(t.GetAffinityMask())>::type() }
|
||||
->KPriorityQueueAffinityMask;
|
||||
|
||||
{ t.GetActiveCore() }
|
||||
->std::convertible_to<s32>;
|
||||
{ t.GetPriority() }
|
||||
->std::convertible_to<s32>;
|
||||
};
|
||||
|
||||
template <typename Member, size_t _NumCores, int LowestPriority, int HighestPriority>
|
||||
requires KPriorityQueueMember<Member> class KPriorityQueue {
|
||||
public:
|
||||
using AffinityMaskType = typename std::remove_cv_t<
|
||||
typename std::remove_reference<decltype(std::declval<Member>().GetAffinityMask())>::type>;
|
||||
|
||||
static_assert(LowestPriority >= 0);
|
||||
static_assert(HighestPriority >= 0);
|
||||
static_assert(LowestPriority >= HighestPriority);
|
||||
static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1;
|
||||
static constexpr size_t NumCores = _NumCores;
|
||||
|
||||
static constexpr bool IsValidCore(s32 core) {
|
||||
return 0 <= core && core < static_cast<s32>(NumCores);
|
||||
}
|
||||
|
||||
static constexpr bool IsValidPriority(s32 priority) {
|
||||
return HighestPriority <= priority && priority <= LowestPriority + 1;
|
||||
}
|
||||
|
||||
private:
|
||||
using Entry = typename Member::QueueEntry;
|
||||
|
||||
public:
|
||||
class KPerCoreQueue {
|
||||
private:
|
||||
std::array<Entry, NumCores> root{};
|
||||
|
||||
public:
|
||||
constexpr KPerCoreQueue() {
|
||||
for (auto& per_core_root : root) {
|
||||
per_core_root.Initialize();
|
||||
}
|
||||
}
|
||||
|
||||
constexpr bool PushBack(s32 core, Member* member) {
|
||||
// Get the entry associated with the member.
|
||||
Entry& member_entry = member->GetPriorityQueueEntry(core);
|
||||
|
||||
// Get the entry associated with the end of the queue.
|
||||
Member* tail = this->root[core].GetPrev();
|
||||
Entry& tail_entry =
|
||||
(tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core];
|
||||
|
||||
// Link the entries.
|
||||
member_entry.SetPrev(tail);
|
||||
member_entry.SetNext(nullptr);
|
||||
tail_entry.SetNext(member);
|
||||
this->root[core].SetPrev(member);
|
||||
|
||||
return tail == nullptr;
|
||||
}
|
||||
|
||||
constexpr bool PushFront(s32 core, Member* member) {
|
||||
// Get the entry associated with the member.
|
||||
Entry& member_entry = member->GetPriorityQueueEntry(core);
|
||||
|
||||
// Get the entry associated with the front of the queue.
|
||||
Member* head = this->root[core].GetNext();
|
||||
Entry& head_entry =
|
||||
(head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core];
|
||||
|
||||
// Link the entries.
|
||||
member_entry.SetPrev(nullptr);
|
||||
member_entry.SetNext(head);
|
||||
head_entry.SetPrev(member);
|
||||
this->root[core].SetNext(member);
|
||||
|
||||
return (head == nullptr);
|
||||
}
|
||||
|
||||
constexpr bool Remove(s32 core, Member* member) {
|
||||
// Get the entry associated with the member.
|
||||
Entry& member_entry = member->GetPriorityQueueEntry(core);
|
||||
|
||||
// Get the entries associated with next and prev.
|
||||
Member* prev = member_entry.GetPrev();
|
||||
Member* next = member_entry.GetNext();
|
||||
Entry& prev_entry =
|
||||
(prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core];
|
||||
Entry& next_entry =
|
||||
(next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core];
|
||||
|
||||
// Unlink.
|
||||
prev_entry.SetNext(next);
|
||||
next_entry.SetPrev(prev);
|
||||
|
||||
return (this->GetFront(core) == nullptr);
|
||||
}
|
||||
|
||||
constexpr Member* GetFront(s32 core) const {
|
||||
return this->root[core].GetNext();
|
||||
}
|
||||
};
|
||||
|
||||
class KPriorityQueueImpl {
|
||||
public:
|
||||
constexpr KPriorityQueueImpl() = default;
|
||||
|
||||
constexpr void PushBack(s32 priority, s32 core, Member* member) {
|
||||
ASSERT(IsValidCore(core));
|
||||
ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (priority > LowestPriority) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this->queues[priority].PushBack(core, member)) {
|
||||
this->available_priorities[core].SetBit(priority);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void PushFront(s32 priority, s32 core, Member* member) {
|
||||
ASSERT(IsValidCore(core));
|
||||
ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (priority > LowestPriority) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this->queues[priority].PushFront(core, member)) {
|
||||
this->available_priorities[core].SetBit(priority);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void Remove(s32 priority, s32 core, Member* member) {
|
||||
ASSERT(IsValidCore(core));
|
||||
ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (priority > LowestPriority) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this->queues[priority].Remove(core, member)) {
|
||||
this->available_priorities[core].ClearBit(priority);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr Member* GetFront(s32 core) const {
|
||||
ASSERT(IsValidCore(core));
|
||||
|
||||
const s32 priority =
|
||||
static_cast<s32>(this->available_priorities[core].CountLeadingZero());
|
||||
if (priority <= LowestPriority) {
|
||||
return this->queues[priority].GetFront(core);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr Member* GetFront(s32 priority, s32 core) const {
|
||||
ASSERT(IsValidCore(core));
|
||||
ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (priority <= LowestPriority) {
|
||||
return this->queues[priority].GetFront(core);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr Member* GetNext(s32 core, const Member* member) const {
|
||||
ASSERT(IsValidCore(core));
|
||||
|
||||
Member* next = member->GetPriorityQueueEntry(core).GetNext();
|
||||
if (next == nullptr) {
|
||||
const s32 priority = static_cast<s32>(
|
||||
this->available_priorities[core].GetNextSet(member->GetPriority()));
|
||||
if (priority <= LowestPriority) {
|
||||
next = this->queues[priority].GetFront(core);
|
||||
}
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
constexpr void MoveToFront(s32 priority, s32 core, Member* member) {
|
||||
ASSERT(IsValidCore(core));
|
||||
ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (priority <= LowestPriority) {
|
||||
this->queues[priority].Remove(core, member);
|
||||
this->queues[priority].PushFront(core, member);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr Member* MoveToBack(s32 priority, s32 core, Member* member) {
|
||||
ASSERT(IsValidCore(core));
|
||||
ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (priority <= LowestPriority) {
|
||||
this->queues[priority].Remove(core, member);
|
||||
this->queues[priority].PushBack(core, member);
|
||||
return this->queues[priority].GetFront(core);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::array<KPerCoreQueue, NumPriority> queues{};
|
||||
std::array<Common::BitSet64<NumPriority>, NumCores> available_priorities{};
|
||||
};
|
||||
|
||||
private:
|
||||
KPriorityQueueImpl scheduled_queue;
|
||||
KPriorityQueueImpl suggested_queue;
|
||||
|
||||
private:
|
||||
constexpr void ClearAffinityBit(u64& affinity, s32 core) {
|
||||
affinity &= ~(u64(1) << core);
|
||||
}
|
||||
|
||||
constexpr s32 GetNextCore(u64& affinity) {
|
||||
const s32 core = Common::CountTrailingZeroes64(affinity);
|
||||
ClearAffinityBit(affinity, core);
|
||||
return core;
|
||||
}
|
||||
|
||||
constexpr void PushBack(s32 priority, Member* member) {
|
||||
ASSERT(IsValidPriority(priority));
|
||||
|
||||
// Push onto the scheduled queue for its core, if we can.
|
||||
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||
this->scheduled_queue.PushBack(priority, core, member);
|
||||
ClearAffinityBit(affinity, core);
|
||||
}
|
||||
|
||||
// And suggest the thread for all other cores.
|
||||
while (affinity) {
|
||||
this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void PushFront(s32 priority, Member* member) {
|
||||
ASSERT(IsValidPriority(priority));
|
||||
|
||||
// Push onto the scheduled queue for its core, if we can.
|
||||
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||
this->scheduled_queue.PushFront(priority, core, member);
|
||||
ClearAffinityBit(affinity, core);
|
||||
}
|
||||
|
||||
// And suggest the thread for all other cores.
|
||||
// Note: Nintendo pushes onto the back of the suggested queue, not the front.
|
||||
while (affinity) {
|
||||
this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void Remove(s32 priority, Member* member) {
|
||||
ASSERT(IsValidPriority(priority));
|
||||
|
||||
// Remove from the scheduled queue for its core.
|
||||
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||
this->scheduled_queue.Remove(priority, core, member);
|
||||
ClearAffinityBit(affinity, core);
|
||||
}
|
||||
|
||||
// Remove from the suggested queue for all other cores.
|
||||
while (affinity) {
|
||||
this->suggested_queue.Remove(priority, GetNextCore(affinity), member);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr KPriorityQueue() = default;
|
||||
|
||||
// Getters.
|
||||
constexpr Member* GetScheduledFront(s32 core) const {
|
||||
return this->scheduled_queue.GetFront(core);
|
||||
}
|
||||
|
||||
constexpr Member* GetScheduledFront(s32 core, s32 priority) const {
|
||||
return this->scheduled_queue.GetFront(priority, core);
|
||||
}
|
||||
|
||||
constexpr Member* GetSuggestedFront(s32 core) const {
|
||||
return this->suggested_queue.GetFront(core);
|
||||
}
|
||||
|
||||
constexpr Member* GetSuggestedFront(s32 core, s32 priority) const {
|
||||
return this->suggested_queue.GetFront(priority, core);
|
||||
}
|
||||
|
||||
constexpr Member* GetScheduledNext(s32 core, const Member* member) const {
|
||||
return this->scheduled_queue.GetNext(core, member);
|
||||
}
|
||||
|
||||
constexpr Member* GetSuggestedNext(s32 core, const Member* member) const {
|
||||
return this->suggested_queue.GetNext(core, member);
|
||||
}
|
||||
|
||||
constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const {
|
||||
return member->GetPriorityQueueEntry(core).GetNext();
|
||||
}
|
||||
|
||||
// Mutators.
|
||||
constexpr void PushBack(Member* member) {
|
||||
this->PushBack(member->GetPriority(), member);
|
||||
}
|
||||
|
||||
constexpr void Remove(Member* member) {
|
||||
this->Remove(member->GetPriority(), member);
|
||||
}
|
||||
|
||||
constexpr void MoveToScheduledFront(Member* member) {
|
||||
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
|
||||
}
|
||||
|
||||
constexpr Thread* MoveToScheduledBack(Member* member) {
|
||||
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
|
||||
member);
|
||||
}
|
||||
|
||||
// First class fancy operations.
|
||||
constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) {
|
||||
ASSERT(IsValidPriority(prev_priority));
|
||||
|
||||
// Remove the member from the queues.
|
||||
const s32 new_priority = member->GetPriority();
|
||||
this->Remove(prev_priority, member);
|
||||
|
||||
// And enqueue. If the member is running, we want to keep it running.
|
||||
if (is_running) {
|
||||
this->PushFront(new_priority, member);
|
||||
} else {
|
||||
this->PushBack(new_priority, member);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity,
|
||||
Member* member) {
|
||||
// Get the new information.
|
||||
const s32 priority = member->GetPriority();
|
||||
const AffinityMaskType& new_affinity = member->GetAffinityMask();
|
||||
const s32 new_core = member->GetActiveCore();
|
||||
|
||||
// Remove the member from all queues it was in before.
|
||||
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
|
||||
if (prev_affinity.GetAffinity(core)) {
|
||||
if (core == prev_core) {
|
||||
this->scheduled_queue.Remove(priority, core, member);
|
||||
} else {
|
||||
this->suggested_queue.Remove(priority, core, member);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// And add the member to all queues it should be in now.
|
||||
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
|
||||
if (new_affinity.GetAffinity(core)) {
|
||||
if (core == new_core) {
|
||||
this->scheduled_queue.PushBack(priority, core, member);
|
||||
} else {
|
||||
this->suggested_queue.PushBack(priority, core, member);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) {
|
||||
// Get the new information.
|
||||
const s32 new_core = member->GetActiveCore();
|
||||
const s32 priority = member->GetPriority();
|
||||
|
||||
// We don't need to do anything if the core is the same.
|
||||
if (prev_core != new_core) {
|
||||
// Remove from the scheduled queue for the previous core.
|
||||
if (prev_core >= 0) {
|
||||
this->scheduled_queue.Remove(priority, prev_core, member);
|
||||
}
|
||||
|
||||
// Remove from the suggested queue and add to the scheduled queue for the new core.
|
||||
if (new_core >= 0) {
|
||||
this->suggested_queue.Remove(priority, new_core, member);
|
||||
if (to_front) {
|
||||
this->scheduled_queue.PushFront(priority, new_core, member);
|
||||
} else {
|
||||
this->scheduled_queue.PushBack(priority, new_core, member);
|
||||
}
|
||||
}
|
||||
|
||||
// Add to the suggested queue for the previous core.
|
||||
if (prev_core >= 0) {
|
||||
this->suggested_queue.PushBack(priority, prev_core, member);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
784
src/core/hle/kernel/k_scheduler.cpp
Executable file
784
src/core/hle/kernel/k_scheduler.cpp
Executable file
@@ -0,0 +1,784 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/fiber.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
static void IncrementScheduledCount(Kernel::Thread* thread) {
|
||||
if (auto process = thread->GetOwnerProcess(); process) {
|
||||
process->IncrementScheduledCount();
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
|
||||
Core::EmuThreadHandle global_thread) {
|
||||
u32 current_core = global_thread.host_handle;
|
||||
bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
|
||||
(current_core < Core::Hardware::NUM_CPU_CORES);
|
||||
|
||||
while (cores_pending_reschedule != 0) {
|
||||
u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule);
|
||||
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
|
||||
if (!must_context_switch || core != current_core) {
|
||||
auto& phys_core = kernel.PhysicalCore(core);
|
||||
phys_core.Interrupt();
|
||||
} else {
|
||||
must_context_switch = true;
|
||||
}
|
||||
cores_pending_reschedule &= ~(1ULL << core);
|
||||
}
|
||||
if (must_context_switch) {
|
||||
auto core_scheduler = kernel.CurrentScheduler();
|
||||
kernel.ExitSVCProfile();
|
||||
core_scheduler->RescheduleCurrentCore();
|
||||
kernel.EnterSVCProfile();
|
||||
}
|
||||
}
|
||||
|
||||
u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
|
||||
std::scoped_lock lock{guard};
|
||||
if (Thread* prev_highest_thread = this->state.highest_priority_thread;
|
||||
prev_highest_thread != highest_thread) {
|
||||
if (prev_highest_thread != nullptr) {
|
||||
IncrementScheduledCount(prev_highest_thread);
|
||||
prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
|
||||
}
|
||||
if (this->state.should_count_idle) {
|
||||
if (highest_thread != nullptr) {
|
||||
// if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
|
||||
// process->SetRunningThread(this->core_id, highest_thread,
|
||||
// this->state.idle_count);
|
||||
//}
|
||||
} else {
|
||||
this->state.idle_count++;
|
||||
}
|
||||
}
|
||||
|
||||
this->state.highest_priority_thread = highest_thread;
|
||||
this->state.needs_scheduling = true;
|
||||
return (1ULL << this->core_id);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Clear that we need to update.
|
||||
ClearSchedulerUpdateNeeded(kernel);
|
||||
|
||||
u64 cores_needing_scheduling = 0, idle_cores = 0;
|
||||
Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
|
||||
auto& priority_queue = GetPriorityQueue(kernel);
|
||||
|
||||
/// We want to go over all cores, finding the highest priority thread and determining if
|
||||
/// scheduling is needed for that core.
|
||||
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
|
||||
if (top_thread != nullptr) {
|
||||
// If the thread has no waiters, we need to check if the process has a thread pinned.
|
||||
// TODO(bunnei): Implement thread pinning
|
||||
} else {
|
||||
idle_cores |= (1ULL << core_id);
|
||||
}
|
||||
|
||||
top_threads[core_id] = top_thread;
|
||||
cores_needing_scheduling |=
|
||||
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
|
||||
}
|
||||
|
||||
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
|
||||
while (idle_cores != 0) {
|
||||
u32 core_id = Common::CountTrailingZeroes64(idle_cores);
|
||||
if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
|
||||
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
|
||||
size_t num_candidates = 0;
|
||||
|
||||
// While we have a suggested thread, try to migrate it!
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_thread =
|
||||
(suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
|
||||
top_thread != suggested) {
|
||||
// Make sure we're not dealing with threads too high priority for migration.
|
||||
if (top_thread != nullptr &&
|
||||
top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
|
||||
break;
|
||||
}
|
||||
|
||||
// The suggested thread isn't bound to its core, so we can migrate it!
|
||||
suggested->SetActiveCore(core_id);
|
||||
priority_queue.ChangeCore(suggested_core, suggested);
|
||||
|
||||
top_threads[core_id] = suggested;
|
||||
cores_needing_scheduling |=
|
||||
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
|
||||
break;
|
||||
}
|
||||
|
||||
// Note this core as a candidate for migration.
|
||||
ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES);
|
||||
migration_candidates[num_candidates++] = suggested_core;
|
||||
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
||||
}
|
||||
|
||||
// If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
|
||||
// candidate cores' top threads.
|
||||
if (suggested == nullptr) {
|
||||
for (size_t i = 0; i < num_candidates; i++) {
|
||||
// Check if there's some other thread that can run on the candidate core.
|
||||
const s32 candidate_core = migration_candidates[i];
|
||||
suggested = top_threads[candidate_core];
|
||||
if (Thread* next_on_candidate_core =
|
||||
priority_queue.GetScheduledNext(candidate_core, suggested);
|
||||
next_on_candidate_core != nullptr) {
|
||||
// The candidate core can run some other thread! We'll migrate its current
|
||||
// top thread to us.
|
||||
top_threads[candidate_core] = next_on_candidate_core;
|
||||
cores_needing_scheduling |=
|
||||
kernel.Scheduler(candidate_core)
|
||||
.UpdateHighestPriorityThread(top_threads[candidate_core]);
|
||||
|
||||
// Perform the migration.
|
||||
suggested->SetActiveCore(core_id);
|
||||
priority_queue.ChangeCore(candidate_core, suggested);
|
||||
|
||||
top_threads[core_id] = suggested;
|
||||
cores_needing_scheduling |=
|
||||
kernel.Scheduler(core_id).UpdateHighestPriorityThread(
|
||||
top_threads[core_id]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
idle_cores &= ~(1ULL << core_id);
|
||||
}
|
||||
|
||||
return cores_needing_scheduling;
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Check if the state has changed, because if it hasn't there's nothing to do.
|
||||
const auto cur_state = thread->scheduling_state;
|
||||
if (cur_state == old_state) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the priority queues.
|
||||
if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
// If we were previously runnable, then we're not runnable now, and we should remove.
|
||||
GetPriorityQueue(kernel).Remove(thread);
|
||||
IncrementScheduledCount(thread);
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
} else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
// If we're now runnable, then we weren't previously, and we should add.
|
||||
GetPriorityQueue(kernel).PushBack(thread);
|
||||
IncrementScheduledCount(thread);
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
|
||||
u32 old_priority) {
|
||||
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// If the thread is runnable, we want to change its priority in the queue.
|
||||
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
GetPriorityQueue(kernel).ChangePriority(
|
||||
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
|
||||
IncrementScheduledCount(thread);
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
|
||||
const KAffinityMask& old_affinity, s32 old_core) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// If the thread is runnable, we want to change its affinity in the queue.
|
||||
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
|
||||
IncrementScheduledCount(thread);
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||
ASSERT(system.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Get a reference to the priority queue.
|
||||
auto& kernel = system.Kernel();
|
||||
auto& priority_queue = GetPriorityQueue(kernel);
|
||||
|
||||
// Rotate the front of the queue to the end.
|
||||
Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
|
||||
Thread* next_thread = nullptr;
|
||||
if (top_thread != nullptr) {
|
||||
next_thread = priority_queue.MoveToScheduledBack(top_thread);
|
||||
if (next_thread != top_thread) {
|
||||
IncrementScheduledCount(top_thread);
|
||||
IncrementScheduledCount(next_thread);
|
||||
}
|
||||
}
|
||||
|
||||
// While we have a suggested thread, try to migrate it!
|
||||
{
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_on_suggested_core =
|
||||
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||
: nullptr;
|
||||
top_on_suggested_core != suggested) {
|
||||
// If the next thread is a new thread that has been waiting longer than our
|
||||
// suggestion, we prefer it to our suggestion.
|
||||
if (top_thread != next_thread && next_thread != nullptr &&
|
||||
next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
|
||||
suggested = nullptr;
|
||||
break;
|
||||
}
|
||||
|
||||
// If we're allowed to do a migration, do one.
|
||||
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
|
||||
// to the front of the queue.
|
||||
if (top_on_suggested_core == nullptr ||
|
||||
top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
|
||||
suggested->SetActiveCore(core_id);
|
||||
priority_queue.ChangeCore(suggested_core, suggested, true);
|
||||
IncrementScheduledCount(suggested);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Get the next suggestion.
|
||||
suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
|
||||
}
|
||||
}
|
||||
|
||||
// Now that we might have migrated a thread with the same priority, check if we can do better.
|
||||
|
||||
{
|
||||
Thread* best_thread = priority_queue.GetScheduledFront(core_id);
|
||||
if (best_thread == GetCurrentThread()) {
|
||||
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
|
||||
}
|
||||
|
||||
// If the best thread we can choose has a priority the same or worse than ours, try to
|
||||
// migrate a higher priority thread.
|
||||
if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
while (suggested != nullptr) {
|
||||
// If the suggestion's priority is the same as ours, don't bother.
|
||||
if (suggested->GetPriority() >= best_thread->GetPriority()) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_on_suggested_core =
|
||||
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||
: nullptr;
|
||||
top_on_suggested_core != suggested) {
|
||||
// If we're allowed to do a migration, do one.
|
||||
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
|
||||
// suggestion to the front of the queue.
|
||||
if (top_on_suggested_core == nullptr ||
|
||||
top_on_suggested_core->GetPriority() >=
|
||||
HighestCoreMigrationAllowedPriority) {
|
||||
suggested->SetActiveCore(core_id);
|
||||
priority_queue.ChangeCore(suggested_core, suggested, true);
|
||||
IncrementScheduledCount(suggested);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Get the next suggestion.
|
||||
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// After a rotation, we need a scheduler update.
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
|
||||
bool KScheduler::CanSchedule(KernelCore& kernel) {
|
||||
return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
|
||||
}
|
||||
|
||||
bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
|
||||
return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
|
||||
kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
|
||||
}
|
||||
|
||||
void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
|
||||
kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
|
||||
}
|
||||
|
||||
void KScheduler::DisableScheduling(KernelCore& kernel) {
|
||||
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
||||
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
|
||||
scheduler->GetCurrentThread()->DisableDispatch();
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
|
||||
Core::EmuThreadHandle global_thread) {
|
||||
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
||||
scheduler->GetCurrentThread()->EnableDispatch();
|
||||
}
|
||||
RescheduleCores(kernel, cores_needing_scheduling, global_thread);
|
||||
}
|
||||
|
||||
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
||||
if (IsSchedulerUpdateNeeded(kernel)) {
|
||||
return UpdateHighestPriorityThreadsImpl(kernel);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
|
||||
return kernel.GlobalSchedulerContext().priority_queue;
|
||||
}
|
||||
|
||||
void KScheduler::YieldWithoutCoreMigration() {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
// Validate preconditions.
|
||||
ASSERT(CanSchedule(kernel));
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
Thread& cur_thread = *GetCurrentThread();
|
||||
Process& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get a reference to the priority queue.
|
||||
auto& priority_queue = GetPriorityQueue(kernel);
|
||||
|
||||
// Perform the yield.
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
const auto cur_state = cur_thread.scheduling_state;
|
||||
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
// Put the current thread at the back of the queue.
|
||||
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||
IncrementScheduledCount(std::addressof(cur_thread));
|
||||
|
||||
// If the next thread is different, we have an update to perform.
|
||||
if (next_thread != std::addressof(cur_thread)) {
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
} else {
|
||||
// Otherwise, set the thread's yield count so that we won't waste work until the
|
||||
// process is scheduled again.
|
||||
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::YieldWithCoreMigration() {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
// Validate preconditions.
|
||||
ASSERT(CanSchedule(kernel));
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
Thread& cur_thread = *GetCurrentThread();
|
||||
Process& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get a reference to the priority queue.
|
||||
auto& priority_queue = GetPriorityQueue(kernel);
|
||||
|
||||
// Perform the yield.
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
const auto cur_state = cur_thread.scheduling_state;
|
||||
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
// Get the current active core.
|
||||
const s32 core_id = cur_thread.GetActiveCore();
|
||||
|
||||
// Put the current thread at the back of the queue.
|
||||
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||
IncrementScheduledCount(std::addressof(cur_thread));
|
||||
|
||||
// While we have a suggested thread, try to migrate it!
|
||||
bool recheck = false;
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the thread running on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
|
||||
if (Thread* running_on_suggested_core =
|
||||
(suggested_core >= 0)
|
||||
? kernel.Scheduler(suggested_core).state.highest_priority_thread
|
||||
: nullptr;
|
||||
running_on_suggested_core != suggested) {
|
||||
// If the current thread's priority is higher than our suggestion's we prefer
|
||||
// the next thread to the suggestion. We also prefer the next thread when the
|
||||
// current thread's priority is equal to the suggestions, but the next thread
|
||||
// has been waiting longer.
|
||||
if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
|
||||
(suggested->GetPriority() == cur_thread.GetPriority() &&
|
||||
next_thread != std::addressof(cur_thread) &&
|
||||
next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) {
|
||||
suggested = nullptr;
|
||||
break;
|
||||
}
|
||||
|
||||
// If we're allowed to do a migration, do one.
|
||||
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
|
||||
// suggestion to the front of the queue.
|
||||
if (running_on_suggested_core == nullptr ||
|
||||
running_on_suggested_core->GetPriority() >=
|
||||
HighestCoreMigrationAllowedPriority) {
|
||||
suggested->SetActiveCore(core_id);
|
||||
priority_queue.ChangeCore(suggested_core, suggested, true);
|
||||
IncrementScheduledCount(suggested);
|
||||
break;
|
||||
} else {
|
||||
// We couldn't perform a migration, but we should check again on a future
|
||||
// yield.
|
||||
recheck = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Get the next suggestion.
|
||||
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
||||
}
|
||||
|
||||
// If we still have a suggestion or the next thread is different, we have an update to
|
||||
// perform.
|
||||
if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
} else if (!recheck) {
|
||||
// Otherwise if we don't need to re-check, set the thread's yield count so that we
|
||||
// won't waste work until the process is scheduled again.
|
||||
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::YieldToAnyThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
// Validate preconditions.
|
||||
ASSERT(CanSchedule(kernel));
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
Thread& cur_thread = *GetCurrentThread();
|
||||
Process& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get a reference to the priority queue.
|
||||
auto& priority_queue = GetPriorityQueue(kernel);
|
||||
|
||||
// Perform the yield.
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
const auto cur_state = cur_thread.scheduling_state;
|
||||
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
// Get the current active core.
|
||||
const s32 core_id = cur_thread.GetActiveCore();
|
||||
|
||||
// Migrate the current thread to core -1.
|
||||
cur_thread.SetActiveCore(-1);
|
||||
priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
|
||||
IncrementScheduledCount(std::addressof(cur_thread));
|
||||
|
||||
// If there's nothing scheduled, we can try to perform a migration.
|
||||
if (priority_queue.GetScheduledFront(core_id) == nullptr) {
|
||||
// While we have a suggested thread, try to migrate it!
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_on_suggested_core =
|
||||
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||
: nullptr;
|
||||
top_on_suggested_core != suggested) {
|
||||
// If we're allowed to do a migration, do one.
|
||||
if (top_on_suggested_core == nullptr ||
|
||||
top_on_suggested_core->GetPriority() >=
|
||||
HighestCoreMigrationAllowedPriority) {
|
||||
suggested->SetActiveCore(core_id);
|
||||
priority_queue.ChangeCore(suggested_core, suggested);
|
||||
IncrementScheduledCount(suggested);
|
||||
}
|
||||
|
||||
// Regardless of whether we migrated, we had a candidate, so we're done.
|
||||
break;
|
||||
}
|
||||
|
||||
// Get the next suggestion.
|
||||
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
||||
}
|
||||
|
||||
// If the suggestion is different from the current thread, we need to perform an
|
||||
// update.
|
||||
if (suggested != std::addressof(cur_thread)) {
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
} else {
|
||||
// Otherwise, set the thread's yield count so that we won't waste work until the
|
||||
// process is scheduled again.
|
||||
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
||||
}
|
||||
} else {
|
||||
// Otherwise, we have an update to perform.
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
KScheduler::KScheduler(Core::System& system, std::size_t core_id)
|
||||
: system(system), core_id(core_id) {
|
||||
switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
|
||||
this->state.needs_scheduling = true;
|
||||
this->state.interrupt_task_thread_runnable = false;
|
||||
this->state.should_count_idle = false;
|
||||
this->state.idle_count = 0;
|
||||
this->state.idle_thread_stack = nullptr;
|
||||
this->state.highest_priority_thread = nullptr;
|
||||
}
|
||||
|
||||
KScheduler::~KScheduler() = default;
|
||||
|
||||
Thread* KScheduler::GetCurrentThread() const {
|
||||
if (current_thread) {
|
||||
return current_thread;
|
||||
}
|
||||
return idle_thread;
|
||||
}
|
||||
|
||||
u64 KScheduler::GetLastContextSwitchTicks() const {
|
||||
return last_context_switch_time;
|
||||
}
|
||||
|
||||
void KScheduler::RescheduleCurrentCore() {
|
||||
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
|
||||
|
||||
auto& phys_core = system.Kernel().PhysicalCore(core_id);
|
||||
if (phys_core.IsInterrupted()) {
|
||||
phys_core.ClearInterrupt();
|
||||
}
|
||||
guard.lock();
|
||||
if (this->state.needs_scheduling) {
|
||||
Schedule();
|
||||
} else {
|
||||
guard.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadStart() {
|
||||
SwitchContextStep2();
|
||||
}
|
||||
|
||||
void KScheduler::Unload(Thread* thread) {
|
||||
if (thread) {
|
||||
thread->SetIsRunning(false);
|
||||
if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
|
||||
system.ArmInterface(core_id).ExceptionalExit();
|
||||
thread->SetContinuousOnSVC(false);
|
||||
}
|
||||
if (!thread->IsHLEThread() && !thread->HasExited()) {
|
||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||
cpu_core.SaveContext(thread->GetContext32());
|
||||
cpu_core.SaveContext(thread->GetContext64());
|
||||
// Save the TPIDR_EL0 system register in case it was modified.
|
||||
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||
cpu_core.ClearExclusiveState();
|
||||
}
|
||||
thread->context_guard.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::Reload(Thread* thread) {
|
||||
if (thread) {
|
||||
ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
|
||||
"Thread must be runnable.");
|
||||
|
||||
// Cancel any outstanding wakeup events for this thread
|
||||
thread->SetIsRunning(true);
|
||||
thread->SetWasRunning(false);
|
||||
|
||||
auto* const thread_owner_process = thread->GetOwnerProcess();
|
||||
if (thread_owner_process != nullptr) {
|
||||
system.Kernel().MakeCurrentProcess(thread_owner_process);
|
||||
}
|
||||
if (!thread->IsHLEThread()) {
|
||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||
cpu_core.LoadContext(thread->GetContext32());
|
||||
cpu_core.LoadContext(thread->GetContext64());
|
||||
cpu_core.SetTlsAddress(thread->GetTLSAddress());
|
||||
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
|
||||
cpu_core.ClearExclusiveState();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::SwitchContextStep2() {
|
||||
// Load context of new thread
|
||||
Reload(current_thread);
|
||||
|
||||
RescheduleCurrentCore();
|
||||
}
|
||||
|
||||
void KScheduler::ScheduleImpl() {
|
||||
Thread* previous_thread = current_thread;
|
||||
current_thread = state.highest_priority_thread;
|
||||
|
||||
this->state.needs_scheduling = false;
|
||||
|
||||
if (current_thread == previous_thread) {
|
||||
guard.unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
Process* const previous_process = system.Kernel().CurrentProcess();
|
||||
|
||||
UpdateLastContextSwitchTime(previous_thread, previous_process);
|
||||
|
||||
// Save context for previous thread
|
||||
Unload(previous_thread);
|
||||
|
||||
std::shared_ptr<Common::Fiber>* old_context;
|
||||
if (previous_thread != nullptr) {
|
||||
old_context = &previous_thread->GetHostContext();
|
||||
} else {
|
||||
old_context = &idle_thread->GetHostContext();
|
||||
}
|
||||
guard.unlock();
|
||||
|
||||
Common::Fiber::YieldTo(*old_context, switch_fiber);
|
||||
/// When a thread wakes up, the scheduler may have changed to other in another core.
|
||||
auto& next_scheduler = *system.Kernel().CurrentScheduler();
|
||||
next_scheduler.SwitchContextStep2();
|
||||
}
|
||||
|
||||
void KScheduler::OnSwitch(void* this_scheduler) {
|
||||
KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
|
||||
sched->SwitchToCurrent();
|
||||
}
|
||||
|
||||
void KScheduler::SwitchToCurrent() {
|
||||
while (true) {
|
||||
{
|
||||
std::scoped_lock lock{guard};
|
||||
current_thread = state.highest_priority_thread;
|
||||
this->state.needs_scheduling = false;
|
||||
}
|
||||
const auto is_switch_pending = [this] {
|
||||
std::scoped_lock lock{guard};
|
||||
return state.needs_scheduling.load(std::memory_order_relaxed);
|
||||
};
|
||||
do {
|
||||
if (current_thread != nullptr && !current_thread->IsHLEThread()) {
|
||||
current_thread->context_guard.lock();
|
||||
if (!current_thread->IsRunnable()) {
|
||||
current_thread->context_guard.unlock();
|
||||
break;
|
||||
}
|
||||
if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
|
||||
current_thread->context_guard.unlock();
|
||||
break;
|
||||
}
|
||||
}
|
||||
std::shared_ptr<Common::Fiber>* next_context;
|
||||
if (current_thread != nullptr) {
|
||||
next_context = ¤t_thread->GetHostContext();
|
||||
} else {
|
||||
next_context = &idle_thread->GetHostContext();
|
||||
}
|
||||
Common::Fiber::YieldTo(switch_fiber, *next_context);
|
||||
} while (!is_switch_pending());
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
|
||||
const u64 prev_switch_ticks = last_context_switch_time;
|
||||
const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
|
||||
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
|
||||
|
||||
if (thread != nullptr) {
|
||||
thread->UpdateCPUTimeTicks(update_ticks);
|
||||
}
|
||||
|
||||
if (process != nullptr) {
|
||||
process->UpdateCPUTimeTicks(update_ticks);
|
||||
}
|
||||
|
||||
last_context_switch_time = most_recent_switch_ticks;
|
||||
}
|
||||
|
||||
void KScheduler::Initialize() {
|
||||
std::string name = "Idle Thread Id:" + std::to_string(core_id);
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
|
||||
auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
idle_thread = thread_res.Unwrap().get();
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock{system.Kernel()};
|
||||
idle_thread->SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
}
|
||||
|
||||
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
|
||||
: KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {}
|
||||
|
||||
KScopedSchedulerLock::~KScopedSchedulerLock() = default;
|
||||
|
||||
} // namespace Kernel
|
||||
201
src/core/hle/kernel/k_scheduler.h
Executable file
201
src/core/hle/kernel/k_scheduler.h
Executable file
@@ -0,0 +1,201 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/hle/kernel/global_scheduler_context.h"
|
||||
#include "core/hle/kernel/k_priority_queue.h"
|
||||
#include "core/hle/kernel/k_scheduler_lock.h"
|
||||
#include "core/hle/kernel/k_scoped_lock.h"
|
||||
|
||||
namespace Common {
|
||||
class Fiber;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Process;
|
||||
class SchedulerLock;
|
||||
class Thread;
|
||||
|
||||
class KScheduler final {
|
||||
public:
|
||||
explicit KScheduler(Core::System& system, std::size_t core_id);
|
||||
~KScheduler();
|
||||
|
||||
/// Reschedules to the next available thread (call after current thread is suspended)
|
||||
void RescheduleCurrentCore();
|
||||
|
||||
/// Reschedules cores pending reschedule, to be called on EnableScheduling.
|
||||
static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
|
||||
Core::EmuThreadHandle global_thread);
|
||||
|
||||
/// The next two are for SingleCore Only.
|
||||
/// Unload current thread before preempting core.
|
||||
void Unload(Thread* thread);
|
||||
|
||||
/// Reload current thread after core preemption.
|
||||
void Reload(Thread* thread);
|
||||
|
||||
/// Gets the current running thread
|
||||
[[nodiscard]] Thread* GetCurrentThread() const;
|
||||
|
||||
/// Gets the timestamp for the last context switch in ticks.
|
||||
[[nodiscard]] u64 GetLastContextSwitchTicks() const;
|
||||
|
||||
[[nodiscard]] bool ContextSwitchPending() const {
|
||||
return state.needs_scheduling.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Initialize();
|
||||
|
||||
void OnThreadStart();
|
||||
|
||||
[[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() {
|
||||
return switch_fiber;
|
||||
}
|
||||
|
||||
[[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const {
|
||||
return switch_fiber;
|
||||
}
|
||||
|
||||
[[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread);
|
||||
|
||||
/**
|
||||
* Takes a thread and moves it to the back of the it's priority list.
|
||||
*
|
||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
void YieldWithoutCoreMigration();
|
||||
|
||||
/**
|
||||
* Takes a thread and moves it to the back of the it's priority list.
|
||||
* Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
|
||||
* a better priority than the next thread in the core.
|
||||
*
|
||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
void YieldWithCoreMigration();
|
||||
|
||||
/**
|
||||
* Takes a thread and moves it out of the scheduling queue.
|
||||
* and into the suggested queue. If no thread can be scheduled afterwards in that core,
|
||||
* a suggested thread is obtained instead.
|
||||
*
|
||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
void YieldToAnyThread();
|
||||
|
||||
/// Notify the scheduler a thread's status has changed.
|
||||
static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state);
|
||||
|
||||
/// Notify the scheduler a thread's priority has changed.
|
||||
static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
|
||||
u32 old_priority);
|
||||
|
||||
/// Notify the scheduler a thread's core and/or affinity mask has changed.
|
||||
static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
|
||||
const KAffinityMask& old_affinity, s32 old_core);
|
||||
|
||||
static bool CanSchedule(KernelCore& kernel);
|
||||
static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
|
||||
static void SetSchedulerUpdateNeeded(KernelCore& kernel);
|
||||
static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
|
||||
static void DisableScheduling(KernelCore& kernel);
|
||||
static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
|
||||
Core::EmuThreadHandle global_thread);
|
||||
[[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
|
||||
|
||||
private:
|
||||
friend class GlobalSchedulerContext;
|
||||
|
||||
/**
|
||||
* Takes care of selecting the new scheduled threads in three steps:
|
||||
*
|
||||
* 1. First a thread is selected from the top of the priority queue. If no thread
|
||||
* is obtained then we move to step two, else we are done.
|
||||
*
|
||||
* 2. Second we try to get a suggested thread that's not assigned to any core or
|
||||
* that is not the top thread in that core.
|
||||
*
|
||||
* 3. Third is no suggested thread is found, we do a second pass and pick a running
|
||||
* thread in another core and swap it with its current thread.
|
||||
*
|
||||
* returns the cores needing scheduling.
|
||||
*/
|
||||
[[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
|
||||
|
||||
[[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
|
||||
|
||||
void RotateScheduledQueue(s32 core_id, s32 priority);
|
||||
|
||||
void Schedule() {
|
||||
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
|
||||
this->ScheduleImpl();
|
||||
}
|
||||
|
||||
/// Switches the CPU's active thread context to that of the specified thread
|
||||
void ScheduleImpl();
|
||||
|
||||
/// When a thread wakes up, it must run this through it's new scheduler
|
||||
void SwitchContextStep2();
|
||||
|
||||
/**
|
||||
* Called on every context switch to update the internal timestamp
|
||||
* This also updates the running time ticks for the given thread and
|
||||
* process using the following difference:
|
||||
*
|
||||
* ticks += most_recent_ticks - last_context_switch_ticks
|
||||
*
|
||||
* The internal tick timestamp for the scheduler is simply the
|
||||
* most recent tick count retrieved. No special arithmetic is
|
||||
* applied to it.
|
||||
*/
|
||||
void UpdateLastContextSwitchTime(Thread* thread, Process* process);
|
||||
|
||||
static void OnSwitch(void* this_scheduler);
|
||||
void SwitchToCurrent();
|
||||
|
||||
Thread* current_thread{};
|
||||
Thread* idle_thread{};
|
||||
|
||||
std::shared_ptr<Common::Fiber> switch_fiber{};
|
||||
|
||||
struct SchedulingState {
|
||||
std::atomic<bool> needs_scheduling;
|
||||
bool interrupt_task_thread_runnable{};
|
||||
bool should_count_idle{};
|
||||
u64 idle_count{};
|
||||
Thread* highest_priority_thread{};
|
||||
void* idle_thread_stack{};
|
||||
};
|
||||
|
||||
SchedulingState state;
|
||||
|
||||
Core::System& system;
|
||||
u64 last_context_switch_time{};
|
||||
const std::size_t core_id;
|
||||
|
||||
Common::SpinLock guard{};
|
||||
};
|
||||
|
||||
class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
|
||||
public:
|
||||
explicit KScopedSchedulerLock(KernelCore& kernel);
|
||||
~KScopedSchedulerLock();
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
74
src/core/hle/kernel/k_scheduler_lock.h
Executable file
74
src/core/hle/kernel/k_scheduler_lock.h
Executable file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/hardware_properties.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
template <typename SchedulerType>
|
||||
class KAbstractSchedulerLock {
|
||||
public:
|
||||
explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {}
|
||||
|
||||
bool IsLockedByCurrentThread() const {
|
||||
return this->owner_thread == kernel.GetCurrentEmuThreadID();
|
||||
}
|
||||
|
||||
void Lock() {
|
||||
if (this->IsLockedByCurrentThread()) {
|
||||
// If we already own the lock, we can just increment the count.
|
||||
ASSERT(this->lock_count > 0);
|
||||
this->lock_count++;
|
||||
} else {
|
||||
// Otherwise, we want to disable scheduling and acquire the spinlock.
|
||||
SchedulerType::DisableScheduling(kernel);
|
||||
this->spin_lock.lock();
|
||||
|
||||
// For debug, ensure that our state is valid.
|
||||
ASSERT(this->lock_count == 0);
|
||||
ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle());
|
||||
|
||||
// Increment count, take ownership.
|
||||
this->lock_count = 1;
|
||||
this->owner_thread = kernel.GetCurrentEmuThreadID();
|
||||
}
|
||||
}
|
||||
|
||||
void Unlock() {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
ASSERT(this->lock_count > 0);
|
||||
|
||||
// Release an instance of the lock.
|
||||
if ((--this->lock_count) == 0) {
|
||||
// We're no longer going to hold the lock. Take note of what cores need scheduling.
|
||||
const u64 cores_needing_scheduling =
|
||||
SchedulerType::UpdateHighestPriorityThreads(kernel);
|
||||
Core::EmuThreadHandle leaving_thread = owner_thread;
|
||||
|
||||
// Note that we no longer hold the lock, and unlock the spinlock.
|
||||
this->owner_thread = Core::EmuThreadHandle::InvalidHandle();
|
||||
this->spin_lock.unlock();
|
||||
|
||||
// Enable scheduling, and perform a rescheduling operation.
|
||||
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
Common::SpinLock spin_lock{};
|
||||
s32 lock_count{};
|
||||
Core::EmuThreadHandle owner_thread{Core::EmuThreadHandle::InvalidHandle()};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
41
src/core/hle/kernel/k_scoped_lock.h
Executable file
41
src/core/hle/kernel/k_scoped_lock.h
Executable file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
template <typename T>
|
||||
concept KLockable = !std::is_reference_v<T> && requires(T & t) {
|
||||
{ t.Lock() }
|
||||
->std::same_as<void>;
|
||||
{ t.Unlock() }
|
||||
->std::same_as<void>;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
requires KLockable<T> class KScopedLock {
|
||||
public:
|
||||
explicit KScopedLock(T* l) : lock_ptr(l) {
|
||||
this->lock_ptr->Lock();
|
||||
}
|
||||
explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) { /* ... */
|
||||
}
|
||||
~KScopedLock() {
|
||||
this->lock_ptr->Unlock();
|
||||
}
|
||||
|
||||
KScopedLock(const KScopedLock&) = delete;
|
||||
KScopedLock(KScopedLock&&) = delete;
|
||||
|
||||
private:
|
||||
T* lock_ptr;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
50
src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
Executable file
50
src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
Executable file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KScopedSchedulerLockAndSleep {
|
||||
public:
|
||||
explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t,
|
||||
s64 timeout)
|
||||
: kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
|
||||
event_handle = InvalidHandle;
|
||||
|
||||
// Lock the scheduler.
|
||||
kernel.GlobalSchedulerContext().scheduler_lock.Lock();
|
||||
}
|
||||
|
||||
~KScopedSchedulerLockAndSleep() {
|
||||
// Register the sleep.
|
||||
if (this->timeout_tick > 0) {
|
||||
kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick);
|
||||
}
|
||||
|
||||
// Unlock the scheduler.
|
||||
kernel.GlobalSchedulerContext().scheduler_lock.Unlock();
|
||||
}
|
||||
|
||||
void CancelSleep() {
|
||||
this->timeout_tick = 0;
|
||||
}
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
Handle& event_handle;
|
||||
Thread* thread{};
|
||||
s64 timeout_tick{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
661
src/core/hle/kernel/kernel.cpp
Executable file
661
src/core/hle/kernel/kernel.cpp
Executable file
@@ -0,0 +1,661 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <bitset>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/thread.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/arm/cpu_interrupt_handler.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
#include "core/hle/kernel/memory/slab_heap.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/kernel/synchronization.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
struct KernelCore::Impl {
|
||||
explicit Impl(Core::System& system, KernelCore& kernel)
|
||||
: synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{
|
||||
system} {}
|
||||
|
||||
void SetMulticore(bool is_multicore) {
|
||||
this->is_multicore = is_multicore;
|
||||
}
|
||||
|
||||
void Initialize(KernelCore& kernel) {
|
||||
process_list.clear();
|
||||
|
||||
RegisterHostThread();
|
||||
|
||||
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
|
||||
|
||||
InitializePhysicalCores();
|
||||
InitializeSystemResourceLimit(kernel);
|
||||
InitializeMemoryLayout();
|
||||
InitializePreemption(kernel);
|
||||
InitializeSchedulers();
|
||||
InitializeSuspendThreads();
|
||||
}
|
||||
|
||||
void InitializeCores() {
|
||||
for (auto& core : cores) {
|
||||
core.Initialize(current_process->Is64BitProcess());
|
||||
}
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
next_object_id = 0;
|
||||
next_kernel_process_id = Process::InitialKIPIDMin;
|
||||
next_user_process_id = Process::ProcessIDMin;
|
||||
next_thread_id = 1;
|
||||
|
||||
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
||||
if (suspend_threads[i]) {
|
||||
suspend_threads[i].reset();
|
||||
}
|
||||
}
|
||||
|
||||
cores.clear();
|
||||
|
||||
current_process = nullptr;
|
||||
|
||||
system_resource_limit = nullptr;
|
||||
|
||||
global_handle_table.Clear();
|
||||
|
||||
preemption_event = nullptr;
|
||||
|
||||
named_ports.clear();
|
||||
|
||||
exclusive_monitor.reset();
|
||||
|
||||
num_host_threads = 0;
|
||||
std::fill(register_host_thread_keys.begin(), register_host_thread_keys.end(),
|
||||
std::thread::id{});
|
||||
std::fill(register_host_thread_values.begin(), register_host_thread_values.end(), 0);
|
||||
|
||||
// Ensures all service threads gracefully shutdown
|
||||
service_threads.clear();
|
||||
}
|
||||
|
||||
void InitializePhysicalCores() {
|
||||
exclusive_monitor =
|
||||
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
|
||||
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
||||
schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
|
||||
cores.emplace_back(i, system, *schedulers[i], interrupts);
|
||||
}
|
||||
}
|
||||
|
||||
void InitializeSchedulers() {
|
||||
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
||||
cores[i].Scheduler().Initialize();
|
||||
}
|
||||
}
|
||||
|
||||
// Creates the default system resource limit
|
||||
void InitializeSystemResourceLimit(KernelCore& kernel) {
|
||||
system_resource_limit = ResourceLimit::Create(kernel);
|
||||
|
||||
// If setting the default system values fails, then something seriously wrong has occurred.
|
||||
ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x100000000)
|
||||
.IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(ResourceType::Threads, 800).IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(ResourceType::Events, 700).IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(ResourceType::TransferMemory, 200).IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(ResourceType::Sessions, 900).IsSuccess());
|
||||
|
||||
if (!system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0) ||
|
||||
!system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0x60000)) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
void InitializePreemption(KernelCore& kernel) {
|
||||
preemption_event = Core::Timing::CreateEvent(
|
||||
"PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
global_scheduler_context->PreemptThreads();
|
||||
}
|
||||
const auto time_interval = std::chrono::nanoseconds{
|
||||
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||
});
|
||||
|
||||
const auto time_interval =
|
||||
std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||
}
|
||||
|
||||
void InitializeSuspendThreads() {
|
||||
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
||||
std::string name = "Suspend Thread Id:" + std::to_string(i);
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
const auto type =
|
||||
static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
|
||||
auto thread_res =
|
||||
Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
|
||||
suspend_threads[i] = std::move(thread_res).Unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
void MakeCurrentProcess(Process* process) {
|
||||
current_process = process;
|
||||
if (process == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
const u32 core_id = GetCurrentHostThreadID();
|
||||
if (core_id < Core::Hardware::NUM_CPU_CORES) {
|
||||
system.Memory().SetCurrentPageTable(*process, core_id);
|
||||
}
|
||||
}
|
||||
|
||||
void RegisterCoreThread(std::size_t core_id) {
|
||||
const std::thread::id this_id = std::this_thread::get_id();
|
||||
if (!is_multicore) {
|
||||
single_core_thread_id = this_id;
|
||||
}
|
||||
const auto end =
|
||||
register_host_thread_keys.begin() + static_cast<ptrdiff_t>(num_host_threads);
|
||||
const auto it = std::find(register_host_thread_keys.begin(), end, this_id);
|
||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
||||
ASSERT(it == end);
|
||||
InsertHostThread(static_cast<u32>(core_id));
|
||||
}
|
||||
|
||||
void RegisterHostThread() {
|
||||
const std::thread::id this_id = std::this_thread::get_id();
|
||||
const auto end =
|
||||
register_host_thread_keys.begin() + static_cast<ptrdiff_t>(num_host_threads);
|
||||
const auto it = std::find(register_host_thread_keys.begin(), end, this_id);
|
||||
if (it == end) {
|
||||
InsertHostThread(registered_thread_ids++);
|
||||
}
|
||||
}
|
||||
|
||||
void InsertHostThread(u32 value) {
|
||||
const size_t index = num_host_threads++;
|
||||
ASSERT_MSG(index < NUM_REGISTRABLE_HOST_THREADS, "Too many host threads");
|
||||
register_host_thread_values[index] = value;
|
||||
register_host_thread_keys[index] = std::this_thread::get_id();
|
||||
}
|
||||
|
||||
[[nodiscard]] u32 GetCurrentHostThreadID() const {
|
||||
const std::thread::id this_id = std::this_thread::get_id();
|
||||
if (!is_multicore && single_core_thread_id == this_id) {
|
||||
return static_cast<u32>(system.GetCpuManager().CurrentCore());
|
||||
}
|
||||
const auto end =
|
||||
register_host_thread_keys.begin() + static_cast<ptrdiff_t>(num_host_threads);
|
||||
const auto it = std::find(register_host_thread_keys.begin(), end, this_id);
|
||||
if (it == end) {
|
||||
return Core::INVALID_HOST_THREAD_ID;
|
||||
}
|
||||
return register_host_thread_values[static_cast<size_t>(
|
||||
std::distance(register_host_thread_keys.begin(), it))];
|
||||
}
|
||||
|
||||
Core::EmuThreadHandle GetCurrentEmuThreadID() const {
|
||||
Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle();
|
||||
result.host_handle = GetCurrentHostThreadID();
|
||||
if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
|
||||
return result;
|
||||
}
|
||||
const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler();
|
||||
const Kernel::Thread* current = sched.GetCurrentThread();
|
||||
if (current != nullptr && !current->IsPhantomMode()) {
|
||||
result.guest_handle = current->GetGlobalHandle();
|
||||
} else {
|
||||
result.guest_handle = InvalidHandle;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void InitializeMemoryLayout() {
|
||||
// Initialize memory layout
|
||||
constexpr Memory::MemoryLayout layout{Memory::MemoryLayout::GetDefaultLayout()};
|
||||
constexpr std::size_t hid_size{0x40000};
|
||||
constexpr std::size_t font_size{0x1100000};
|
||||
constexpr std::size_t irs_size{0x8000};
|
||||
constexpr std::size_t time_size{0x1000};
|
||||
constexpr PAddr hid_addr{layout.System().StartAddress()};
|
||||
constexpr PAddr font_pa{layout.System().StartAddress() + hid_size};
|
||||
constexpr PAddr irs_addr{layout.System().StartAddress() + hid_size + font_size};
|
||||
constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size};
|
||||
|
||||
// Initialize memory manager
|
||||
memory_manager = std::make_unique<Memory::MemoryManager>();
|
||||
memory_manager->InitializeManager(Memory::MemoryManager::Pool::Application,
|
||||
layout.Application().StartAddress(),
|
||||
layout.Application().EndAddress());
|
||||
memory_manager->InitializeManager(Memory::MemoryManager::Pool::Applet,
|
||||
layout.Applet().StartAddress(),
|
||||
layout.Applet().EndAddress());
|
||||
memory_manager->InitializeManager(Memory::MemoryManager::Pool::System,
|
||||
layout.System().StartAddress(),
|
||||
layout.System().EndAddress());
|
||||
|
||||
hid_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{hid_addr, hid_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, hid_addr, hid_size, "HID:SharedMemory");
|
||||
font_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{font_pa, font_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, font_pa, font_size, "Font:SharedMemory");
|
||||
irs_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{irs_addr, irs_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, irs_addr, irs_size, "IRS:SharedMemory");
|
||||
time_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{time_addr, time_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, time_addr, time_size, "Time:SharedMemory");
|
||||
|
||||
// Allocate slab heaps
|
||||
user_slab_heap_pages = std::make_unique<Memory::SlabHeap<Memory::Page>>();
|
||||
|
||||
// Initialize slab heaps
|
||||
constexpr u64 user_slab_heap_size{0x3de000};
|
||||
user_slab_heap_pages->Initialize(
|
||||
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
|
||||
user_slab_heap_size);
|
||||
}
|
||||
|
||||
std::atomic<u32> next_object_id{0};
|
||||
std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
|
||||
std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
|
||||
std::atomic<u64> next_thread_id{1};
|
||||
|
||||
// Lists all processes that exist in the current session.
|
||||
std::vector<std::shared_ptr<Process>> process_list;
|
||||
Process* current_process = nullptr;
|
||||
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
|
||||
Kernel::Synchronization synchronization;
|
||||
Kernel::TimeManager time_manager;
|
||||
|
||||
std::shared_ptr<ResourceLimit> system_resource_limit;
|
||||
|
||||
std::shared_ptr<Core::Timing::EventType> preemption_event;
|
||||
|
||||
// This is the kernel's handle table or supervisor handle table which
|
||||
// stores all the objects in place.
|
||||
HandleTable global_handle_table;
|
||||
|
||||
/// Map of named ports managed by the kernel, which can be retrieved using
|
||||
/// the ConnectToPort SVC.
|
||||
NamedPortTable named_ports;
|
||||
|
||||
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
|
||||
std::vector<Kernel::PhysicalCore> cores;
|
||||
|
||||
// 0-3 IDs represent core threads, >3 represent others
|
||||
std::atomic<u32> registered_thread_ids{Core::Hardware::NUM_CPU_CORES};
|
||||
|
||||
// Number of host threads is a relatively high number to avoid overflowing
|
||||
static constexpr size_t NUM_REGISTRABLE_HOST_THREADS = 1024;
|
||||
std::atomic<size_t> num_host_threads{0};
|
||||
std::array<std::atomic<std::thread::id>, NUM_REGISTRABLE_HOST_THREADS>
|
||||
register_host_thread_keys{};
|
||||
std::array<std::atomic<u32>, NUM_REGISTRABLE_HOST_THREADS> register_host_thread_values{};
|
||||
|
||||
// Kernel memory management
|
||||
std::unique_ptr<Memory::MemoryManager> memory_manager;
|
||||
std::unique_ptr<Memory::SlabHeap<Memory::Page>> user_slab_heap_pages;
|
||||
|
||||
// Shared memory for services
|
||||
std::shared_ptr<Kernel::SharedMemory> hid_shared_mem;
|
||||
std::shared_ptr<Kernel::SharedMemory> font_shared_mem;
|
||||
std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
|
||||
std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
|
||||
|
||||
// Threads used for services
|
||||
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
|
||||
|
||||
std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
|
||||
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
||||
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
||||
|
||||
bool is_multicore{};
|
||||
std::thread::id single_core_thread_id{};
|
||||
|
||||
std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
|
||||
|
||||
// System context
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system, *this)} {}
|
||||
KernelCore::~KernelCore() {
|
||||
Shutdown();
|
||||
}
|
||||
|
||||
void KernelCore::SetMulticore(bool is_multicore) {
|
||||
impl->SetMulticore(is_multicore);
|
||||
}
|
||||
|
||||
void KernelCore::Initialize() {
|
||||
impl->Initialize(*this);
|
||||
}
|
||||
|
||||
void KernelCore::InitializeCores() {
|
||||
impl->InitializeCores();
|
||||
}
|
||||
|
||||
void KernelCore::Shutdown() {
|
||||
impl->Shutdown();
|
||||
}
|
||||
|
||||
std::shared_ptr<ResourceLimit> KernelCore::GetSystemResourceLimit() const {
|
||||
return impl->system_resource_limit;
|
||||
}
|
||||
|
||||
std::shared_ptr<Thread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
|
||||
return impl->global_handle_table.Get<Thread>(handle);
|
||||
}
|
||||
|
||||
void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) {
|
||||
impl->process_list.push_back(std::move(process));
|
||||
}
|
||||
|
||||
void KernelCore::MakeCurrentProcess(Process* process) {
|
||||
impl->MakeCurrentProcess(process);
|
||||
}
|
||||
|
||||
Process* KernelCore::CurrentProcess() {
|
||||
return impl->current_process;
|
||||
}
|
||||
|
||||
const Process* KernelCore::CurrentProcess() const {
|
||||
return impl->current_process;
|
||||
}
|
||||
|
||||
const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const {
|
||||
return impl->process_list;
|
||||
}
|
||||
|
||||
Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() {
|
||||
return *impl->global_scheduler_context;
|
||||
}
|
||||
|
||||
const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const {
|
||||
return *impl->global_scheduler_context;
|
||||
}
|
||||
|
||||
Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) {
|
||||
return *impl->schedulers[id];
|
||||
}
|
||||
|
||||
const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
|
||||
return *impl->schedulers[id];
|
||||
}
|
||||
|
||||
Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) {
|
||||
return impl->cores[id];
|
||||
}
|
||||
|
||||
const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
|
||||
return impl->cores[id];
|
||||
}
|
||||
|
||||
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
|
||||
u32 core_id = impl->GetCurrentHostThreadID();
|
||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
||||
return impl->cores[core_id];
|
||||
}
|
||||
|
||||
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
|
||||
u32 core_id = impl->GetCurrentHostThreadID();
|
||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
||||
return impl->cores[core_id];
|
||||
}
|
||||
|
||||
Kernel::KScheduler* KernelCore::CurrentScheduler() {
|
||||
u32 core_id = impl->GetCurrentHostThreadID();
|
||||
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
|
||||
// This is expected when called from not a guest thread
|
||||
return {};
|
||||
}
|
||||
return impl->schedulers[core_id].get();
|
||||
}
|
||||
|
||||
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
|
||||
return impl->interrupts;
|
||||
}
|
||||
|
||||
const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts()
|
||||
const {
|
||||
return impl->interrupts;
|
||||
}
|
||||
|
||||
Kernel::Synchronization& KernelCore::Synchronization() {
|
||||
return impl->synchronization;
|
||||
}
|
||||
|
||||
const Kernel::Synchronization& KernelCore::Synchronization() const {
|
||||
return impl->synchronization;
|
||||
}
|
||||
|
||||
Kernel::TimeManager& KernelCore::TimeManager() {
|
||||
return impl->time_manager;
|
||||
}
|
||||
|
||||
const Kernel::TimeManager& KernelCore::TimeManager() const {
|
||||
return impl->time_manager;
|
||||
}
|
||||
|
||||
Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() {
|
||||
return *impl->exclusive_monitor;
|
||||
}
|
||||
|
||||
const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
|
||||
return *impl->exclusive_monitor;
|
||||
}
|
||||
|
||||
void KernelCore::InvalidateAllInstructionCaches() {
|
||||
for (auto& physical_core : impl->cores) {
|
||||
physical_core.ArmInterface().ClearInstructionCache();
|
||||
}
|
||||
}
|
||||
|
||||
void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
|
||||
for (auto& physical_core : impl->cores) {
|
||||
if (!physical_core.IsInitialized()) {
|
||||
continue;
|
||||
}
|
||||
physical_core.ArmInterface().InvalidateCacheRange(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void KernelCore::PrepareReschedule(std::size_t id) {
|
||||
// TODO: Reimplement, this
|
||||
}
|
||||
|
||||
void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) {
|
||||
impl->named_ports.emplace(std::move(name), std::move(port));
|
||||
}
|
||||
|
||||
KernelCore::NamedPortTable::iterator KernelCore::FindNamedPort(const std::string& name) {
|
||||
return impl->named_ports.find(name);
|
||||
}
|
||||
|
||||
KernelCore::NamedPortTable::const_iterator KernelCore::FindNamedPort(
|
||||
const std::string& name) const {
|
||||
return impl->named_ports.find(name);
|
||||
}
|
||||
|
||||
bool KernelCore::IsValidNamedPort(NamedPortTable::const_iterator port) const {
|
||||
return port != impl->named_ports.cend();
|
||||
}
|
||||
|
||||
u32 KernelCore::CreateNewObjectID() {
|
||||
return impl->next_object_id++;
|
||||
}
|
||||
|
||||
u64 KernelCore::CreateNewThreadID() {
|
||||
return impl->next_thread_id++;
|
||||
}
|
||||
|
||||
u64 KernelCore::CreateNewKernelProcessID() {
|
||||
return impl->next_kernel_process_id++;
|
||||
}
|
||||
|
||||
u64 KernelCore::CreateNewUserProcessID() {
|
||||
return impl->next_user_process_id++;
|
||||
}
|
||||
|
||||
Kernel::HandleTable& KernelCore::GlobalHandleTable() {
|
||||
return impl->global_handle_table;
|
||||
}
|
||||
|
||||
const Kernel::HandleTable& KernelCore::GlobalHandleTable() const {
|
||||
return impl->global_handle_table;
|
||||
}
|
||||
|
||||
void KernelCore::RegisterCoreThread(std::size_t core_id) {
|
||||
impl->RegisterCoreThread(core_id);
|
||||
}
|
||||
|
||||
void KernelCore::RegisterHostThread() {
|
||||
impl->RegisterHostThread();
|
||||
}
|
||||
|
||||
u32 KernelCore::GetCurrentHostThreadID() const {
|
||||
return impl->GetCurrentHostThreadID();
|
||||
}
|
||||
|
||||
Core::EmuThreadHandle KernelCore::GetCurrentEmuThreadID() const {
|
||||
return impl->GetCurrentEmuThreadID();
|
||||
}
|
||||
|
||||
Memory::MemoryManager& KernelCore::MemoryManager() {
|
||||
return *impl->memory_manager;
|
||||
}
|
||||
|
||||
const Memory::MemoryManager& KernelCore::MemoryManager() const {
|
||||
return *impl->memory_manager;
|
||||
}
|
||||
|
||||
Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() {
|
||||
return *impl->user_slab_heap_pages;
|
||||
}
|
||||
|
||||
const Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() const {
|
||||
return *impl->user_slab_heap_pages;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetHidSharedMem() {
|
||||
return *impl->hid_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetHidSharedMem() const {
|
||||
return *impl->hid_shared_mem;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetFontSharedMem() {
|
||||
return *impl->font_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetFontSharedMem() const {
|
||||
return *impl->font_shared_mem;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetIrsSharedMem() {
|
||||
return *impl->irs_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetIrsSharedMem() const {
|
||||
return *impl->irs_shared_mem;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetTimeSharedMem() {
|
||||
return *impl->time_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
|
||||
return *impl->time_shared_mem;
|
||||
}
|
||||
|
||||
void KernelCore::Suspend(bool in_suspention) {
|
||||
const bool should_suspend = exception_exited || in_suspention;
|
||||
{
|
||||
KScopedSchedulerLock lock(*this);
|
||||
ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
|
||||
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
||||
impl->suspend_threads[i]->SetStatus(status);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool KernelCore::IsMulticore() const {
|
||||
return impl->is_multicore;
|
||||
}
|
||||
|
||||
void KernelCore::ExceptionalExit() {
|
||||
exception_exited = true;
|
||||
Suspend(true);
|
||||
}
|
||||
|
||||
void KernelCore::EnterSVCProfile() {
|
||||
std::size_t core = impl->GetCurrentHostThreadID();
|
||||
impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
|
||||
}
|
||||
|
||||
void KernelCore::ExitSVCProfile() {
|
||||
std::size_t core = impl->GetCurrentHostThreadID();
|
||||
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
|
||||
}
|
||||
|
||||
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
||||
auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name);
|
||||
impl->service_threads.emplace(service_thread);
|
||||
return service_thread;
|
||||
}
|
||||
|
||||
void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
|
||||
if (auto strong_ptr = service_thread.lock()) {
|
||||
impl->service_threads.erase(strong_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
275
src/core/hle/kernel/kernel.h
Executable file
275
src/core/hle/kernel/kernel.h
Executable file
@@ -0,0 +1,275 @@
|
||||
// Copyright 2014 Citra Emulator Project / PPSSPP Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "core/arm/cpu_interrupt_handler.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Core {
|
||||
class CPUInterruptHandler;
|
||||
class ExclusiveMonitor;
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
struct EventType;
|
||||
} // namespace Core::Timing
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace Memory {
|
||||
class MemoryManager;
|
||||
template <typename T>
|
||||
class SlabHeap;
|
||||
} // namespace Memory
|
||||
|
||||
class AddressArbiter;
|
||||
class ClientPort;
|
||||
class GlobalSchedulerContext;
|
||||
class HandleTable;
|
||||
class PhysicalCore;
|
||||
class Process;
|
||||
class ResourceLimit;
|
||||
class KScheduler;
|
||||
class SharedMemory;
|
||||
class ServiceThread;
|
||||
class Synchronization;
|
||||
class Thread;
|
||||
class TimeManager;
|
||||
|
||||
/// Represents a single instance of the kernel.
|
||||
class KernelCore {
|
||||
private:
|
||||
using NamedPortTable = std::unordered_map<std::string, std::shared_ptr<ClientPort>>;
|
||||
|
||||
public:
|
||||
/// Constructs an instance of the kernel using the given System
|
||||
/// instance as a context for any necessary system-related state,
|
||||
/// such as threads, CPU core state, etc.
|
||||
///
|
||||
/// @post After execution of the constructor, the provided System
|
||||
/// object *must* outlive the kernel instance itself.
|
||||
///
|
||||
explicit KernelCore(Core::System& system);
|
||||
~KernelCore();
|
||||
|
||||
KernelCore(const KernelCore&) = delete;
|
||||
KernelCore& operator=(const KernelCore&) = delete;
|
||||
|
||||
KernelCore(KernelCore&&) = delete;
|
||||
KernelCore& operator=(KernelCore&&) = delete;
|
||||
|
||||
/// Sets if emulation is multicore or single core, must be set before Initialize
|
||||
void SetMulticore(bool is_multicore);
|
||||
|
||||
/// Resets the kernel to a clean slate for use.
|
||||
void Initialize();
|
||||
|
||||
/// Initializes the CPU cores.
|
||||
void InitializeCores();
|
||||
|
||||
/// Clears all resources in use by the kernel instance.
|
||||
void Shutdown();
|
||||
|
||||
/// Retrieves a shared pointer to the system resource limit instance.
|
||||
std::shared_ptr<ResourceLimit> GetSystemResourceLimit() const;
|
||||
|
||||
/// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
|
||||
std::shared_ptr<Thread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
|
||||
|
||||
/// Adds the given shared pointer to an internal list of active processes.
|
||||
void AppendNewProcess(std::shared_ptr<Process> process);
|
||||
|
||||
/// Makes the given process the new current process.
|
||||
void MakeCurrentProcess(Process* process);
|
||||
|
||||
/// Retrieves a pointer to the current process.
|
||||
Process* CurrentProcess();
|
||||
|
||||
/// Retrieves a const pointer to the current process.
|
||||
const Process* CurrentProcess() const;
|
||||
|
||||
/// Retrieves the list of processes.
|
||||
const std::vector<std::shared_ptr<Process>>& GetProcessList() const;
|
||||
|
||||
/// Gets the sole instance of the global scheduler
|
||||
Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
|
||||
|
||||
/// Gets the sole instance of the global scheduler
|
||||
const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const;
|
||||
|
||||
/// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
|
||||
Kernel::KScheduler& Scheduler(std::size_t id);
|
||||
|
||||
/// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
|
||||
const Kernel::KScheduler& Scheduler(std::size_t id) const;
|
||||
|
||||
/// Gets the an instance of the respective physical CPU core.
|
||||
Kernel::PhysicalCore& PhysicalCore(std::size_t id);
|
||||
|
||||
/// Gets the an instance of the respective physical CPU core.
|
||||
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
|
||||
|
||||
/// Gets the sole instance of the Scheduler at the current running core.
|
||||
Kernel::KScheduler* CurrentScheduler();
|
||||
|
||||
/// Gets the an instance of the current physical CPU core.
|
||||
Kernel::PhysicalCore& CurrentPhysicalCore();
|
||||
|
||||
/// Gets the an instance of the current physical CPU core.
|
||||
const Kernel::PhysicalCore& CurrentPhysicalCore() const;
|
||||
|
||||
/// Gets the an instance of the Synchronization Interface.
|
||||
Kernel::Synchronization& Synchronization();
|
||||
|
||||
/// Gets the an instance of the Synchronization Interface.
|
||||
const Kernel::Synchronization& Synchronization() const;
|
||||
|
||||
/// Gets the an instance of the TimeManager Interface.
|
||||
Kernel::TimeManager& TimeManager();
|
||||
|
||||
/// Gets the an instance of the TimeManager Interface.
|
||||
const Kernel::TimeManager& TimeManager() const;
|
||||
|
||||
/// Stops execution of 'id' core, in order to reschedule a new thread.
|
||||
void PrepareReschedule(std::size_t id);
|
||||
|
||||
Core::ExclusiveMonitor& GetExclusiveMonitor();
|
||||
|
||||
const Core::ExclusiveMonitor& GetExclusiveMonitor() const;
|
||||
|
||||
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts();
|
||||
|
||||
const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const;
|
||||
|
||||
void InvalidateAllInstructionCaches();
|
||||
|
||||
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
|
||||
|
||||
/// Adds a port to the named port table
|
||||
void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port);
|
||||
|
||||
/// Finds a port within the named port table with the given name.
|
||||
NamedPortTable::iterator FindNamedPort(const std::string& name);
|
||||
|
||||
/// Finds a port within the named port table with the given name.
|
||||
NamedPortTable::const_iterator FindNamedPort(const std::string& name) const;
|
||||
|
||||
/// Determines whether or not the given port is a valid named port.
|
||||
bool IsValidNamedPort(NamedPortTable::const_iterator port) const;
|
||||
|
||||
/// Gets the current host_thread/guest_thread handle.
|
||||
Core::EmuThreadHandle GetCurrentEmuThreadID() const;
|
||||
|
||||
/// Gets the current host_thread handle.
|
||||
u32 GetCurrentHostThreadID() const;
|
||||
|
||||
/// Register the current thread as a CPU Core Thread.
|
||||
void RegisterCoreThread(std::size_t core_id);
|
||||
|
||||
/// Register the current thread as a non CPU core thread.
|
||||
void RegisterHostThread();
|
||||
|
||||
/// Gets the virtual memory manager for the kernel.
|
||||
Memory::MemoryManager& MemoryManager();
|
||||
|
||||
/// Gets the virtual memory manager for the kernel.
|
||||
const Memory::MemoryManager& MemoryManager() const;
|
||||
|
||||
/// Gets the slab heap allocated for user space pages.
|
||||
Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages();
|
||||
|
||||
/// Gets the slab heap allocated for user space pages.
|
||||
const Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages() const;
|
||||
|
||||
/// Gets the shared memory object for HID services.
|
||||
Kernel::SharedMemory& GetHidSharedMem();
|
||||
|
||||
/// Gets the shared memory object for HID services.
|
||||
const Kernel::SharedMemory& GetHidSharedMem() const;
|
||||
|
||||
/// Gets the shared memory object for font services.
|
||||
Kernel::SharedMemory& GetFontSharedMem();
|
||||
|
||||
/// Gets the shared memory object for font services.
|
||||
const Kernel::SharedMemory& GetFontSharedMem() const;
|
||||
|
||||
/// Gets the shared memory object for IRS services.
|
||||
Kernel::SharedMemory& GetIrsSharedMem();
|
||||
|
||||
/// Gets the shared memory object for IRS services.
|
||||
const Kernel::SharedMemory& GetIrsSharedMem() const;
|
||||
|
||||
/// Gets the shared memory object for Time services.
|
||||
Kernel::SharedMemory& GetTimeSharedMem();
|
||||
|
||||
/// Gets the shared memory object for Time services.
|
||||
const Kernel::SharedMemory& GetTimeSharedMem() const;
|
||||
|
||||
/// Suspend/unsuspend the OS.
|
||||
void Suspend(bool in_suspention);
|
||||
|
||||
/// Exceptional exit the OS.
|
||||
void ExceptionalExit();
|
||||
|
||||
bool IsMulticore() const;
|
||||
|
||||
void EnterSVCProfile();
|
||||
|
||||
void ExitSVCProfile();
|
||||
|
||||
/**
|
||||
* Creates an HLE service thread, which are used to execute service routines asynchronously.
|
||||
* While these are allocated per ServerSession, these need to be owned and managed outside of
|
||||
* ServerSession to avoid a circular dependency.
|
||||
* @param name String name for the ServerSession creating this thread, used for debug purposes.
|
||||
* @returns The a weak pointer newly created service thread.
|
||||
*/
|
||||
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
|
||||
|
||||
/**
|
||||
* Releases a HLE service thread, instructing KernelCore to free it. This should be called when
|
||||
* the ServerSession associated with the thread is destroyed.
|
||||
* @param service_thread Service thread to release.
|
||||
*/
|
||||
void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread);
|
||||
|
||||
private:
|
||||
friend class Object;
|
||||
friend class Process;
|
||||
friend class Thread;
|
||||
|
||||
/// Creates a new object ID, incrementing the internal object ID counter.
|
||||
u32 CreateNewObjectID();
|
||||
|
||||
/// Creates a new process ID, incrementing the internal process ID counter;
|
||||
u64 CreateNewKernelProcessID();
|
||||
|
||||
/// Creates a new process ID, incrementing the internal process ID counter;
|
||||
u64 CreateNewUserProcessID();
|
||||
|
||||
/// Creates a new thread ID, incrementing the internal thread ID counter.
|
||||
u64 CreateNewThreadID();
|
||||
|
||||
/// Provides a reference to the global handle table.
|
||||
Kernel::HandleTable& GlobalHandleTable();
|
||||
|
||||
/// Provides a const reference to the global handle table.
|
||||
const Kernel::HandleTable& GlobalHandleTable() const;
|
||||
|
||||
struct Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
bool exception_exited{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
117
src/core/hle/kernel/memory/address_space_info.cpp
Executable file
117
src/core/hle/kernel/memory/address_space_info.cpp
Executable file
@@ -0,0 +1,117 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/memory/address_space_info.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
namespace {
|
||||
|
||||
enum : u64 {
|
||||
Size_1_MB = 0x100000,
|
||||
Size_2_MB = 2 * Size_1_MB,
|
||||
Size_128_MB = 128 * Size_1_MB,
|
||||
Size_1_GB = 0x40000000,
|
||||
Size_2_GB = 2 * Size_1_GB,
|
||||
Size_4_GB = 4 * Size_1_GB,
|
||||
Size_6_GB = 6 * Size_1_GB,
|
||||
Size_64_GB = 64 * Size_1_GB,
|
||||
Size_512_GB = 512 * Size_1_GB,
|
||||
Invalid = std::numeric_limits<u64>::max(),
|
||||
};
|
||||
|
||||
// clang-format off
|
||||
constexpr std::array<AddressSpaceInfo, 13> AddressSpaceInfos{{
|
||||
{ .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = AddressSpaceInfo::Type::Is32Bit, },
|
||||
{ .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = AddressSpaceInfo::Type::Small64Bit, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Is32Bit, },
|
||||
{ .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = AddressSpaceInfo::Type::Small64Bit, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Large64Bit, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Is32Bit },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = AddressSpaceInfo::Type::Stack, },
|
||||
}};
|
||||
// clang-format on
|
||||
|
||||
constexpr bool IsAllowedIndexForAddress(std::size_t index) {
|
||||
return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Invalid;
|
||||
}
|
||||
|
||||
using IndexArray = std::array<std::size_t, static_cast<std::size_t>(AddressSpaceInfo::Type::Count)>;
|
||||
|
||||
constexpr IndexArray AddressSpaceIndices32Bit{
|
||||
0, 1, 0, 2, 0, 3,
|
||||
};
|
||||
|
||||
constexpr IndexArray AddressSpaceIndices36Bit{
|
||||
4, 5, 4, 6, 4, 7,
|
||||
};
|
||||
|
||||
constexpr IndexArray AddressSpaceIndices39Bit{
|
||||
9, 8, 8, 10, 12, 11,
|
||||
};
|
||||
|
||||
constexpr bool IsAllowed32BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
|
||||
type != AddressSpaceInfo::Type::Stack;
|
||||
}
|
||||
|
||||
constexpr bool IsAllowed36BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
|
||||
type != AddressSpaceInfo::Type::Stack;
|
||||
}
|
||||
|
||||
constexpr bool IsAllowed39BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Small64Bit;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
||||
const std::size_t index{static_cast<std::size_t>(type)};
|
||||
switch (width) {
|
||||
case 32:
|
||||
ASSERT(IsAllowed32BitType(type));
|
||||
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[index]));
|
||||
return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].address;
|
||||
case 36:
|
||||
ASSERT(IsAllowed36BitType(type));
|
||||
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[index]));
|
||||
return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].address;
|
||||
case 39:
|
||||
ASSERT(IsAllowed39BitType(type));
|
||||
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
|
||||
const std::size_t index{static_cast<std::size_t>(type)};
|
||||
switch (width) {
|
||||
case 32:
|
||||
ASSERT(IsAllowed32BitType(type));
|
||||
return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].size;
|
||||
case 36:
|
||||
ASSERT(IsAllowed36BitType(type));
|
||||
return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].size;
|
||||
case 39:
|
||||
ASSERT(IsAllowed39BitType(type));
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
34
src/core/hle/kernel/memory/address_space_info.h
Executable file
34
src/core/hle/kernel/memory/address_space_info.h
Executable file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
struct AddressSpaceInfo final {
|
||||
enum class Type : u32 {
|
||||
Is32Bit = 0,
|
||||
Small64Bit = 1,
|
||||
Large64Bit = 2,
|
||||
Heap = 3,
|
||||
Stack = 4,
|
||||
Alias = 5,
|
||||
Count,
|
||||
};
|
||||
|
||||
static u64 GetAddressSpaceStart(std::size_t width, Type type);
|
||||
static std::size_t GetAddressSpaceSize(std::size_t width, Type type);
|
||||
|
||||
const std::size_t bit_width{};
|
||||
const std::size_t address{};
|
||||
const std::size_t size{};
|
||||
const Type type{};
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
335
src/core/hle/kernel/memory/memory_block.h
Executable file
335
src/core/hle/kernel/memory/memory_block.h
Executable file
@@ -0,0 +1,335 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
enum class MemoryState : u32 {
|
||||
None = 0,
|
||||
Mask = 0xFF,
|
||||
All = ~None,
|
||||
|
||||
FlagCanReprotect = (1 << 8),
|
||||
FlagCanDebug = (1 << 9),
|
||||
FlagCanUseIpc = (1 << 10),
|
||||
FlagCanUseNonDeviceIpc = (1 << 11),
|
||||
FlagCanUseNonSecureIpc = (1 << 12),
|
||||
FlagMapped = (1 << 13),
|
||||
FlagCode = (1 << 14),
|
||||
FlagCanAlias = (1 << 15),
|
||||
FlagCanCodeAlias = (1 << 16),
|
||||
FlagCanTransfer = (1 << 17),
|
||||
FlagCanQueryPhysical = (1 << 18),
|
||||
FlagCanDeviceMap = (1 << 19),
|
||||
FlagCanAlignedDeviceMap = (1 << 20),
|
||||
FlagCanIpcUserBuffer = (1 << 21),
|
||||
FlagReferenceCounted = (1 << 22),
|
||||
FlagCanMapProcess = (1 << 23),
|
||||
FlagCanChangeAttribute = (1 << 24),
|
||||
FlagCanCodeMemory = (1 << 25),
|
||||
|
||||
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
|
||||
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
|
||||
FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer |
|
||||
FlagReferenceCounted | FlagCanChangeAttribute,
|
||||
|
||||
FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
|
||||
FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap |
|
||||
FlagCanAlignedDeviceMap | FlagReferenceCounted,
|
||||
|
||||
FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap,
|
||||
|
||||
Free = static_cast<u32>(Svc::MemoryState::Free),
|
||||
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped,
|
||||
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
|
||||
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
|
||||
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
|
||||
FlagCanCodeMemory,
|
||||
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted,
|
||||
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
|
||||
|
||||
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
|
||||
FlagCanCodeAlias,
|
||||
AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData |
|
||||
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory,
|
||||
|
||||
Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap |
|
||||
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
|
||||
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
ThreadLocal =
|
||||
static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
|
||||
|
||||
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
|
||||
FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
|
||||
FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible),
|
||||
|
||||
NonSecureIpc = static_cast<u32>(Svc::MemoryState::NonSecureIpc) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
NonDeviceIpc =
|
||||
static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
|
||||
|
||||
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
|
||||
FlagReferenceCounted | FlagCanDebug,
|
||||
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
|
||||
|
||||
static_assert(static_cast<u32>(MemoryState::Free) == 0x00000000);
|
||||
static_assert(static_cast<u32>(MemoryState::Io) == 0x00002001);
|
||||
static_assert(static_cast<u32>(MemoryState::Static) == 0x00042002);
|
||||
static_assert(static_cast<u32>(MemoryState::Code) == 0x00DC7E03);
|
||||
static_assert(static_cast<u32>(MemoryState::CodeData) == 0x03FEBD04);
|
||||
static_assert(static_cast<u32>(MemoryState::Normal) == 0x037EBD05);
|
||||
static_assert(static_cast<u32>(MemoryState::Shared) == 0x00402006);
|
||||
static_assert(static_cast<u32>(MemoryState::AliasCode) == 0x00DD7E08);
|
||||
static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09);
|
||||
static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A);
|
||||
static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B);
|
||||
static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C);
|
||||
static_assert(static_cast<u32>(MemoryState::Transfered) == 0x015C3C0D);
|
||||
static_assert(static_cast<u32>(MemoryState::SharedTransfered) == 0x005C380E);
|
||||
static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F);
|
||||
static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010);
|
||||
static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811);
|
||||
static_assert(static_cast<u32>(MemoryState::NonDeviceIpc) == 0x004C2812);
|
||||
static_assert(static_cast<u32>(MemoryState::Kernel) == 0x00002013);
|
||||
static_assert(static_cast<u32>(MemoryState::GeneratedCode) == 0x00402214);
|
||||
static_assert(static_cast<u32>(MemoryState::CodeOut) == 0x00402015);
|
||||
|
||||
enum class MemoryPermission : u8 {
|
||||
None = 0,
|
||||
Mask = static_cast<u8>(~None),
|
||||
|
||||
Read = 1 << 0,
|
||||
Write = 1 << 1,
|
||||
Execute = 1 << 2,
|
||||
|
||||
ReadAndWrite = Read | Write,
|
||||
ReadAndExecute = Read | Execute,
|
||||
|
||||
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
|
||||
Svc::MemoryPermission::Execute),
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission);
|
||||
|
||||
enum class MemoryAttribute : u8 {
|
||||
None = 0x00,
|
||||
Mask = 0x7F,
|
||||
All = Mask,
|
||||
DontCareMask = 0x80,
|
||||
|
||||
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
|
||||
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
|
||||
DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared),
|
||||
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
|
||||
|
||||
IpcAndDeviceMapped = IpcLocked | DeviceShared,
|
||||
LockedAndIpcLocked = Locked | IpcLocked,
|
||||
DeviceSharedAndUncached = DeviceShared | Uncached
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
|
||||
|
||||
static_assert((static_cast<u8>(MemoryAttribute::Mask) &
|
||||
static_cast<u8>(MemoryAttribute::DontCareMask)) == 0);
|
||||
|
||||
struct MemoryInfo {
|
||||
VAddr addr{};
|
||||
std::size_t size{};
|
||||
MemoryState state{};
|
||||
MemoryPermission perm{};
|
||||
MemoryAttribute attribute{};
|
||||
MemoryPermission original_perm{};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
|
||||
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
|
||||
return {
|
||||
addr,
|
||||
size,
|
||||
static_cast<Svc::MemoryState>(state & MemoryState::Mask),
|
||||
static_cast<Svc::MemoryAttribute>(attribute & MemoryAttribute::Mask),
|
||||
static_cast<Svc::MemoryPermission>(perm & MemoryPermission::UserMask),
|
||||
ipc_lock_count,
|
||||
device_use_count,
|
||||
};
|
||||
}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return addr;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return size;
|
||||
}
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return GetSize() / PageSize;
|
||||
}
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
}
|
||||
constexpr VAddr GetLastAddress() const {
|
||||
return GetEndAddress() - 1;
|
||||
}
|
||||
};
|
||||
|
||||
class MemoryBlock final {
|
||||
friend class MemoryBlockManager;
|
||||
|
||||
private:
|
||||
VAddr addr{};
|
||||
std::size_t num_pages{};
|
||||
MemoryState state{MemoryState::None};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
MemoryPermission perm{MemoryPermission::None};
|
||||
MemoryPermission original_perm{MemoryPermission::None};
|
||||
MemoryAttribute attribute{MemoryAttribute::None};
|
||||
|
||||
public:
|
||||
static constexpr int Compare(const MemoryBlock& lhs, const MemoryBlock& rhs) {
|
||||
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||
return -1;
|
||||
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr MemoryBlock() = default;
|
||||
constexpr MemoryBlock(VAddr addr_, std::size_t num_pages_, MemoryState state_,
|
||||
MemoryPermission perm_, MemoryAttribute attribute_)
|
||||
: addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return addr;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetSize() const {
|
||||
return GetNumPages() * PageSize;
|
||||
}
|
||||
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
}
|
||||
|
||||
constexpr VAddr GetLastAddress() const {
|
||||
return GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr MemoryInfo GetMemoryInfo() const {
|
||||
return {
|
||||
GetAddress(), GetSize(), state, perm,
|
||||
attribute, original_perm, ipc_lock_count, device_use_count,
|
||||
};
|
||||
}
|
||||
|
||||
void ShareToDevice(MemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared ||
|
||||
device_use_count == 0);
|
||||
attribute |= MemoryAttribute::DeviceShared;
|
||||
const u16 new_use_count{++device_use_count};
|
||||
ASSERT(new_use_count > 0);
|
||||
}
|
||||
|
||||
void UnshareToDevice(MemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared);
|
||||
const u16 prev_use_count{device_use_count--};
|
||||
ASSERT(prev_use_count > 0);
|
||||
if (prev_use_count == 1) {
|
||||
attribute &= ~MemoryAttribute::DeviceShared;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const {
|
||||
constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask |
|
||||
MemoryAttribute::IpcLocked |
|
||||
MemoryAttribute::DeviceShared};
|
||||
return state == s && perm == p &&
|
||||
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||
}
|
||||
|
||||
constexpr bool HasSameProperties(const MemoryBlock& rhs) const {
|
||||
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
|
||||
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
|
||||
device_use_count == rhs.device_use_count;
|
||||
}
|
||||
|
||||
constexpr bool Contains(VAddr start) const {
|
||||
return GetAddress() <= start && start <= GetEndAddress();
|
||||
}
|
||||
|
||||
constexpr void Add(std::size_t count) {
|
||||
ASSERT(count > 0);
|
||||
ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
|
||||
|
||||
num_pages += count;
|
||||
}
|
||||
|
||||
constexpr void Update(MemoryState new_state, MemoryPermission new_perm,
|
||||
MemoryAttribute new_attribute) {
|
||||
ASSERT(original_perm == MemoryPermission::None);
|
||||
ASSERT((attribute & MemoryAttribute::IpcLocked) == MemoryAttribute::None);
|
||||
|
||||
state = new_state;
|
||||
perm = new_perm;
|
||||
|
||||
attribute = static_cast<MemoryAttribute>(
|
||||
new_attribute |
|
||||
(attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared)));
|
||||
}
|
||||
|
||||
constexpr MemoryBlock Split(VAddr split_addr) {
|
||||
ASSERT(GetAddress() < split_addr);
|
||||
ASSERT(Contains(split_addr));
|
||||
ASSERT(Common::IsAligned(split_addr, PageSize));
|
||||
|
||||
MemoryBlock block;
|
||||
block.addr = addr;
|
||||
block.num_pages = (split_addr - GetAddress()) / PageSize;
|
||||
block.state = state;
|
||||
block.ipc_lock_count = ipc_lock_count;
|
||||
block.device_use_count = device_use_count;
|
||||
block.perm = perm;
|
||||
block.original_perm = original_perm;
|
||||
block.attribute = attribute;
|
||||
|
||||
addr = split_addr;
|
||||
num_pages -= block.num_pages;
|
||||
|
||||
return block;
|
||||
}
|
||||
};
|
||||
static_assert(std::is_trivially_destructible<MemoryBlock>::value);
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
223
src/core/hle/kernel/memory/memory_block_manager.cpp
Executable file
223
src/core/hle/kernel/memory/memory_block_manager.cpp
Executable file
@@ -0,0 +1,223 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/memory/memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
MemoryBlockManager::MemoryBlockManager(VAddr start_addr, VAddr end_addr)
|
||||
: start_addr{start_addr}, end_addr{end_addr} {
|
||||
const u64 num_pages{(end_addr - start_addr) / PageSize};
|
||||
memory_block_tree.emplace_back(start_addr, num_pages, MemoryState::Free, MemoryPermission::None,
|
||||
MemoryAttribute::None);
|
||||
}
|
||||
|
||||
MemoryBlockManager::iterator MemoryBlockManager::FindIterator(VAddr addr) {
|
||||
auto node{memory_block_tree.begin()};
|
||||
while (node != end()) {
|
||||
const VAddr end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
|
||||
if (node->GetAddress() <= addr && end_addr - 1 >= addr) {
|
||||
return node;
|
||||
}
|
||||
node = std::next(node);
|
||||
}
|
||||
return end();
|
||||
}
|
||||
|
||||
VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||
std::size_t num_pages, std::size_t align, std::size_t offset,
|
||||
std::size_t guard_pages) {
|
||||
if (num_pages == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const VAddr region_end{region_start + region_num_pages * PageSize};
|
||||
const VAddr region_last{region_end - 1};
|
||||
for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
|
||||
const auto info{it->GetMemoryInfo()};
|
||||
if (region_last < info.GetAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (info.state != MemoryState::Free) {
|
||||
continue;
|
||||
}
|
||||
|
||||
VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
|
||||
area += guard_pages * PageSize;
|
||||
|
||||
const VAddr offset_area{Common::AlignDown(area, align) + offset};
|
||||
area = (area <= offset_area) ? offset_area : offset_area + align;
|
||||
|
||||
const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
|
||||
const VAddr area_last{area_end - 1};
|
||||
|
||||
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
||||
area_last <= info.GetLastAddress()) {
|
||||
return area;
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
|
||||
MemoryPermission prev_perm, MemoryAttribute prev_attribute,
|
||||
MemoryState state, MemoryPermission perm,
|
||||
MemoryAttribute attribute) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
prev_attribute |= MemoryAttribute::IpcAndDeviceMapped;
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < end_addr) {
|
||||
if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
|
||||
node = next_node;
|
||||
continue;
|
||||
}
|
||||
|
||||
iterator new_node{node};
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
|
||||
if (end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(end_addr));
|
||||
}
|
||||
|
||||
new_node->Update(state, perm, attribute);
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= end_addr - 1) {
|
||||
break;
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState state,
|
||||
MemoryPermission perm, MemoryAttribute attribute) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < end_addr) {
|
||||
iterator new_node{node};
|
||||
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
|
||||
if (end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(end_addr));
|
||||
}
|
||||
|
||||
new_node->Update(state, perm, attribute);
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= end_addr - 1) {
|
||||
break;
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
MemoryPermission perm) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < end_addr) {
|
||||
iterator new_node{node};
|
||||
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
|
||||
if (end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(end_addr));
|
||||
}
|
||||
|
||||
lock_func(new_node, perm);
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= end_addr - 1) {
|
||||
break;
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
|
||||
const_iterator it{FindIterator(start)};
|
||||
MemoryInfo info{};
|
||||
do {
|
||||
info = it->GetMemoryInfo();
|
||||
func(info);
|
||||
it = std::next(it);
|
||||
} while (info.addr + info.size - 1 < end - 1 && it != cend());
|
||||
}
|
||||
|
||||
void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||
MemoryBlock* block{&(*it)};
|
||||
|
||||
auto EraseIt = [&](const iterator it_to_erase) {
|
||||
if (next_it == it_to_erase) {
|
||||
next_it = std::next(next_it);
|
||||
}
|
||||
memory_block_tree.erase(it_to_erase);
|
||||
};
|
||||
|
||||
if (it != memory_block_tree.begin()) {
|
||||
MemoryBlock* prev{&(*std::prev(it))};
|
||||
|
||||
if (block->HasSameProperties(*prev)) {
|
||||
const iterator prev_it{std::prev(it)};
|
||||
|
||||
prev->Add(block->GetNumPages());
|
||||
EraseIt(it);
|
||||
|
||||
it = prev_it;
|
||||
block = prev;
|
||||
}
|
||||
}
|
||||
|
||||
if (it != cend()) {
|
||||
const MemoryBlock* const next{&(*std::next(it))};
|
||||
|
||||
if (block->HasSameProperties(*next)) {
|
||||
block->Add(next->GetNumPages());
|
||||
EraseIt(std::next(it));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
66
src/core/hle/kernel/memory/memory_block_manager.h
Executable file
66
src/core/hle/kernel/memory/memory_block_manager.h
Executable file
@@ -0,0 +1,66 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <list>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class MemoryBlockManager final {
|
||||
public:
|
||||
using MemoryBlockTree = std::list<MemoryBlock>;
|
||||
using iterator = MemoryBlockTree::iterator;
|
||||
using const_iterator = MemoryBlockTree::const_iterator;
|
||||
|
||||
public:
|
||||
MemoryBlockManager(VAddr start_addr, VAddr end_addr);
|
||||
|
||||
iterator end() {
|
||||
return memory_block_tree.end();
|
||||
}
|
||||
const_iterator end() const {
|
||||
return memory_block_tree.end();
|
||||
}
|
||||
const_iterator cend() const {
|
||||
return memory_block_tree.cend();
|
||||
}
|
||||
|
||||
iterator FindIterator(VAddr addr);
|
||||
|
||||
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||
std::size_t align, std::size_t offset, std::size_t guard_pages);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
|
||||
MemoryPermission prev_perm, MemoryAttribute prev_attribute, MemoryState state,
|
||||
MemoryPermission perm, MemoryAttribute attribute);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, MemoryState state,
|
||||
MemoryPermission perm = MemoryPermission::None,
|
||||
MemoryAttribute attribute = MemoryAttribute::None);
|
||||
|
||||
using LockFunc = std::function<void(iterator, MemoryPermission)>;
|
||||
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, MemoryPermission perm);
|
||||
|
||||
using IterateFunc = std::function<void(const MemoryInfo&)>;
|
||||
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
|
||||
|
||||
MemoryBlock& FindBlock(VAddr addr) {
|
||||
return *FindIterator(addr);
|
||||
}
|
||||
|
||||
private:
|
||||
void MergeAdjacent(iterator it, iterator& next_it);
|
||||
|
||||
[[maybe_unused]] const VAddr start_addr;
|
||||
[[maybe_unused]] const VAddr end_addr;
|
||||
|
||||
MemoryBlockTree memory_block_tree;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
71
src/core/hle/kernel/memory/memory_layout.h
Executable file
71
src/core/hle/kernel/memory/memory_layout.h
Executable file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class MemoryRegion final {
|
||||
friend class MemoryLayout;
|
||||
|
||||
public:
|
||||
constexpr PAddr StartAddress() const {
|
||||
return start_address;
|
||||
}
|
||||
|
||||
constexpr PAddr EndAddress() const {
|
||||
return end_address;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr MemoryRegion() = default;
|
||||
constexpr MemoryRegion(PAddr start_address, PAddr end_address)
|
||||
: start_address{start_address}, end_address{end_address} {}
|
||||
|
||||
const PAddr start_address{};
|
||||
const PAddr end_address{};
|
||||
};
|
||||
|
||||
class MemoryLayout final {
|
||||
public:
|
||||
constexpr const MemoryRegion& Application() const {
|
||||
return application;
|
||||
}
|
||||
|
||||
constexpr const MemoryRegion& Applet() const {
|
||||
return applet;
|
||||
}
|
||||
|
||||
constexpr const MemoryRegion& System() const {
|
||||
return system;
|
||||
}
|
||||
|
||||
static constexpr MemoryLayout GetDefaultLayout() {
|
||||
constexpr std::size_t application_size{0xcd500000};
|
||||
constexpr std::size_t applet_size{0x1fb00000};
|
||||
constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size};
|
||||
constexpr PAddr application_end_address{Core::DramMemoryMap::End};
|
||||
constexpr PAddr applet_start_address{application_start_address - applet_size};
|
||||
constexpr PAddr applet_end_address{applet_start_address + applet_size};
|
||||
constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd};
|
||||
constexpr PAddr system_end_address{applet_start_address};
|
||||
return {application_start_address, application_end_address, applet_start_address,
|
||||
applet_end_address, system_start_address, system_end_address};
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr MemoryLayout(PAddr application_start_address, std::size_t application_size,
|
||||
PAddr applet_start_address, std::size_t applet_size,
|
||||
PAddr system_start_address, std::size_t system_size)
|
||||
: application{application_start_address, application_size},
|
||||
applet{applet_start_address, applet_size}, system{system_start_address, system_size} {}
|
||||
|
||||
const MemoryRegion application;
|
||||
const MemoryRegion applet;
|
||||
const MemoryRegion system;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
175
src/core/hle/kernel/memory/memory_manager.cpp
Executable file
175
src/core/hle/kernel/memory/memory_manager.cpp
Executable file
@@ -0,0 +1,175 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
#include "core/hle/kernel/memory/page_linked_list.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
|
||||
const auto size{end_address - start_address};
|
||||
|
||||
// Calculate metadata sizes
|
||||
const auto ref_count_size{(size / PageSize) * sizeof(u16)};
|
||||
const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)};
|
||||
const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
|
||||
const auto page_heap_size{PageHeap::CalculateMetadataOverheadSize(size)};
|
||||
const auto total_metadata_size{manager_size + page_heap_size};
|
||||
ASSERT(manager_size <= total_metadata_size);
|
||||
ASSERT(Common::IsAligned(total_metadata_size, PageSize));
|
||||
|
||||
// Setup region
|
||||
pool = new_pool;
|
||||
|
||||
// Initialize the manager's KPageHeap
|
||||
heap.Initialize(start_address, size, page_heap_size);
|
||||
|
||||
// Free the memory to the heap
|
||||
heap.Free(start_address, size / PageSize);
|
||||
|
||||
// Update the heap's used size
|
||||
heap.UpdateUsedSize();
|
||||
|
||||
return total_metadata_size;
|
||||
}
|
||||
|
||||
void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
|
||||
ASSERT(pool < Pool::Count);
|
||||
managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
|
||||
}
|
||||
|
||||
VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
|
||||
Direction dir) {
|
||||
// Early return if we're allocating no pages
|
||||
if (num_pages == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// Lock the pool that we're allocating from
|
||||
const auto pool_index{static_cast<std::size_t>(pool)};
|
||||
std::lock_guard lock{pool_locks[pool_index]};
|
||||
|
||||
// Choose a heap based on our page size request
|
||||
const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
|
||||
|
||||
// Loop, trying to iterate from each block
|
||||
// TODO (bunnei): Support multiple managers
|
||||
Impl& chosen_manager{managers[pool_index]};
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(heap_index)};
|
||||
|
||||
// If we failed to allocate, quit now
|
||||
if (!allocated_block) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// If we allocated more than we need, free some
|
||||
const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
|
||||
if (allocated_pages > num_pages) {
|
||||
chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
||||
}
|
||||
|
||||
return allocated_block;
|
||||
}
|
||||
|
||||
ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
ASSERT(page_list.GetNumPages() == 0);
|
||||
|
||||
// Early return if we're allocating no pages
|
||||
if (num_pages == 0) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
// Lock the pool that we're allocating from
|
||||
const auto pool_index{static_cast<std::size_t>(pool)};
|
||||
std::lock_guard lock{pool_locks[pool_index]};
|
||||
|
||||
// Choose a heap based on our page size request
|
||||
const s32 heap_index{PageHeap::GetBlockIndex(num_pages)};
|
||||
if (heap_index < 0) {
|
||||
return ERR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
// TODO (bunnei): Support multiple managers
|
||||
Impl& chosen_manager{managers[pool_index]};
|
||||
|
||||
// Ensure that we don't leave anything un-freed
|
||||
auto group_guard = detail::ScopeExit([&] {
|
||||
for (const auto& it : page_list.Nodes()) {
|
||||
const auto min_num_pages{std::min<size_t>(
|
||||
it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
|
||||
chosen_manager.Free(it.GetAddress(), min_num_pages);
|
||||
}
|
||||
});
|
||||
|
||||
// Keep allocating until we've allocated all our pages
|
||||
for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
|
||||
const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)};
|
||||
|
||||
while (num_pages >= pages_per_alloc) {
|
||||
// Allocate a block
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(index)};
|
||||
if (!allocated_block) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Safely add it to our group
|
||||
{
|
||||
auto block_guard = detail::ScopeExit(
|
||||
[&] { chosen_manager.Free(allocated_block, pages_per_alloc); });
|
||||
|
||||
if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
block_guard.Cancel();
|
||||
}
|
||||
|
||||
num_pages -= pages_per_alloc;
|
||||
}
|
||||
}
|
||||
|
||||
// Only succeed if we allocated as many pages as we wanted
|
||||
if (num_pages) {
|
||||
return ERR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
// We succeeded!
|
||||
group_guard.Cancel();
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
// Early return if we're freeing no pages
|
||||
if (!num_pages) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
// Lock the pool that we're freeing from
|
||||
const auto pool_index{static_cast<std::size_t>(pool)};
|
||||
std::lock_guard lock{pool_locks[pool_index]};
|
||||
|
||||
// TODO (bunnei): Support multiple managers
|
||||
Impl& chosen_manager{managers[pool_index]};
|
||||
|
||||
// Free all of the pages
|
||||
for (const auto& it : page_list.Nodes()) {
|
||||
const auto min_num_pages{std::min<size_t>(
|
||||
it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
|
||||
chosen_manager.Free(it.GetAddress(), min_num_pages);
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
96
src/core/hle/kernel/memory/memory_manager.h
Executable file
96
src/core/hle/kernel/memory/memory_manager.h
Executable file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <mutex>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/page_heap.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class PageLinkedList;
|
||||
|
||||
class MemoryManager final : NonCopyable {
|
||||
public:
|
||||
enum class Pool : u32 {
|
||||
Application = 0,
|
||||
Applet = 1,
|
||||
System = 2,
|
||||
SystemNonSecure = 3,
|
||||
|
||||
Count,
|
||||
|
||||
Shift = 4,
|
||||
Mask = (0xF << Shift),
|
||||
};
|
||||
|
||||
enum class Direction : u32 {
|
||||
FromFront = 0,
|
||||
FromBack = 1,
|
||||
|
||||
Shift = 0,
|
||||
Mask = (0xF << Shift),
|
||||
};
|
||||
|
||||
MemoryManager() = default;
|
||||
|
||||
constexpr std::size_t GetSize(Pool pool) const {
|
||||
return managers[static_cast<std::size_t>(pool)].GetSize();
|
||||
}
|
||||
|
||||
void InitializeManager(Pool pool, u64 start_address, u64 end_address);
|
||||
VAddr AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
ResultCode Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
ResultCode Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
|
||||
static constexpr std::size_t MaxManagerCount = 10;
|
||||
|
||||
private:
|
||||
class Impl final : NonCopyable {
|
||||
private:
|
||||
using RefCount = u16;
|
||||
|
||||
private:
|
||||
PageHeap heap;
|
||||
Pool pool{};
|
||||
|
||||
public:
|
||||
Impl() = default;
|
||||
|
||||
std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
|
||||
|
||||
VAddr AllocateBlock(s32 index) {
|
||||
return heap.AllocateBlock(index);
|
||||
}
|
||||
|
||||
void Free(VAddr addr, std::size_t num_pages) {
|
||||
heap.Free(addr, num_pages);
|
||||
}
|
||||
|
||||
constexpr std::size_t GetSize() const {
|
||||
return heap.GetSize();
|
||||
}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return heap.GetAddress();
|
||||
}
|
||||
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return heap.GetEndAddress();
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks;
|
||||
std::array<Impl, MaxManagerCount> managers;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
18
src/core/hle/kernel/memory/memory_types.h
Executable file
18
src/core/hle/kernel/memory/memory_types.h
Executable file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
constexpr std::size_t PageBits{12};
|
||||
constexpr std::size_t PageSize{1 << PageBits};
|
||||
|
||||
using Page = std::array<u8, PageSize>;
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
119
src/core/hle/kernel/memory/page_heap.cpp
Executable file
119
src/core/hle/kernel/memory/page_heap.cpp
Executable file
@@ -0,0 +1,119 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/memory/page_heap.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
|
||||
// Check our assumptions
|
||||
ASSERT(Common::IsAligned((address), PageSize));
|
||||
ASSERT(Common::IsAligned(size, PageSize));
|
||||
|
||||
// Set our members
|
||||
heap_address = address;
|
||||
heap_size = size;
|
||||
|
||||
// Setup bitmaps
|
||||
metadata.resize(metadata_size / sizeof(u64));
|
||||
u64* cur_bitmap_storage{metadata.data()};
|
||||
for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
|
||||
const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
|
||||
const std::size_t next_block_shift{
|
||||
(i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
|
||||
cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift,
|
||||
next_block_shift, cur_bitmap_storage);
|
||||
}
|
||||
}
|
||||
|
||||
VAddr PageHeap::AllocateBlock(s32 index) {
|
||||
const std::size_t needed_size{blocks[index].GetSize()};
|
||||
|
||||
for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
|
||||
if (const VAddr addr{blocks[i].PopBlock()}; addr) {
|
||||
if (const std::size_t allocated_size{blocks[i].GetSize()};
|
||||
allocated_size > needed_size) {
|
||||
Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void PageHeap::FreeBlock(VAddr block, s32 index) {
|
||||
do {
|
||||
block = blocks[index++].PushBlock(block);
|
||||
} while (block != 0);
|
||||
}
|
||||
|
||||
void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
||||
// Freeing no pages is a no-op
|
||||
if (num_pages == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Find the largest block size that we can free, and free as many as possible
|
||||
s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1};
|
||||
const VAddr start{addr};
|
||||
const VAddr end{(num_pages * PageSize) + addr};
|
||||
VAddr before_start{start};
|
||||
VAddr before_end{start};
|
||||
VAddr after_start{end};
|
||||
VAddr after_end{end};
|
||||
while (big_index >= 0) {
|
||||
const std::size_t block_size{blocks[big_index].GetSize()};
|
||||
const VAddr big_start{Common::AlignUp((start), block_size)};
|
||||
const VAddr big_end{Common::AlignDown((end), block_size)};
|
||||
if (big_start < big_end) {
|
||||
// Free as many big blocks as we can
|
||||
for (auto block{big_start}; block < big_end; block += block_size) {
|
||||
FreeBlock(block, big_index);
|
||||
}
|
||||
before_end = big_start;
|
||||
after_start = big_end;
|
||||
break;
|
||||
}
|
||||
big_index--;
|
||||
}
|
||||
ASSERT(big_index >= 0);
|
||||
|
||||
// Free space before the big blocks
|
||||
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||
const std::size_t block_size{blocks[i].GetSize()};
|
||||
while (before_start + block_size <= before_end) {
|
||||
before_end -= block_size;
|
||||
FreeBlock(before_end, i);
|
||||
}
|
||||
}
|
||||
|
||||
// Free space after the big blocks
|
||||
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||
const std::size_t block_size{blocks[i].GetSize()};
|
||||
while (after_start + block_size <= after_end) {
|
||||
FreeBlock(after_start, i);
|
||||
after_start += block_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::size_t PageHeap::CalculateMetadataOverheadSize(std::size_t region_size) {
|
||||
std::size_t overhead_size = 0;
|
||||
for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
|
||||
const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
|
||||
const std::size_t next_block_shift{
|
||||
(i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
|
||||
overhead_size += PageHeap::Block::CalculateMetadataOverheadSize(
|
||||
region_size, cur_block_shift, next_block_shift);
|
||||
}
|
||||
return Common::AlignUp(overhead_size, PageSize);
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
370
src/core/hle/kernel/memory/page_heap.h
Executable file
370
src/core/hle/kernel/memory/page_heap.h
Executable file
@@ -0,0 +1,370 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class PageHeap final : NonCopyable {
|
||||
public:
|
||||
static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
|
||||
const auto target_pages{std::max(num_pages, align_pages)};
|
||||
for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||
if (target_pages <=
|
||||
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return static_cast<s32>(i);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
|
||||
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
|
||||
if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockSize(std::size_t index) {
|
||||
return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockNumPages(std::size_t index) {
|
||||
return GetBlockSize(index) / PageSize;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr std::size_t NumMemoryBlockPageShifts{7};
|
||||
static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
|
||||
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
|
||||
};
|
||||
|
||||
class Block final : NonCopyable {
|
||||
private:
|
||||
class Bitmap final : NonCopyable {
|
||||
public:
|
||||
static constexpr std::size_t MaxDepth{4};
|
||||
|
||||
private:
|
||||
std::array<u64*, MaxDepth> bit_storages{};
|
||||
std::size_t num_bits{};
|
||||
std::size_t used_depths{};
|
||||
|
||||
public:
|
||||
constexpr Bitmap() = default;
|
||||
|
||||
constexpr std::size_t GetNumBits() const {
|
||||
return num_bits;
|
||||
}
|
||||
constexpr s32 GetHighestDepthIndex() const {
|
||||
return static_cast<s32>(used_depths) - 1;
|
||||
}
|
||||
|
||||
constexpr u64* Initialize(u64* storage, std::size_t size) {
|
||||
//* Initially, everything is un-set
|
||||
num_bits = 0;
|
||||
|
||||
// Calculate the needed bitmap depth
|
||||
used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
|
||||
ASSERT(used_depths <= MaxDepth);
|
||||
|
||||
// Set the bitmap pointers
|
||||
for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
|
||||
bit_storages[depth] = storage;
|
||||
size = Common::AlignUp(size, 64) / 64;
|
||||
storage += size;
|
||||
}
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
||||
s64 FindFreeBlock() const {
|
||||
uintptr_t offset{};
|
||||
s32 depth{};
|
||||
|
||||
do {
|
||||
const u64 v{bit_storages[depth][offset]};
|
||||
if (v == 0) {
|
||||
// Non-zero depth indicates that a previous level had a free block
|
||||
ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * 64 + Common::CountTrailingZeroes64(v);
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(used_depths));
|
||||
|
||||
return static_cast<s64>(offset);
|
||||
}
|
||||
|
||||
constexpr void SetBit(std::size_t offset) {
|
||||
SetBit(GetHighestDepthIndex(), offset);
|
||||
num_bits++;
|
||||
}
|
||||
|
||||
constexpr void ClearBit(std::size_t offset) {
|
||||
ClearBit(GetHighestDepthIndex(), offset);
|
||||
num_bits--;
|
||||
}
|
||||
|
||||
constexpr bool ClearRange(std::size_t offset, std::size_t count) {
|
||||
const s32 depth{GetHighestDepthIndex()};
|
||||
const auto bit_ind{offset / 64};
|
||||
u64* bits{bit_storages[depth]};
|
||||
if (count < 64) {
|
||||
const auto shift{offset % 64};
|
||||
ASSERT(shift + count <= 64);
|
||||
// Check that all the bits are set
|
||||
const u64 mask{((1ULL << count) - 1) << shift};
|
||||
u64 v{bits[bit_ind]};
|
||||
if ((v & mask) != mask) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Clear the bits
|
||||
v &= ~mask;
|
||||
bits[bit_ind] = v;
|
||||
if (v == 0) {
|
||||
ClearBit(depth - 1, bit_ind);
|
||||
}
|
||||
} else {
|
||||
ASSERT(offset % 64 == 0);
|
||||
ASSERT(count % 64 == 0);
|
||||
// Check that all the bits are set
|
||||
std::size_t remaining{count};
|
||||
std::size_t i = 0;
|
||||
do {
|
||||
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||
return false;
|
||||
}
|
||||
remaining -= 64;
|
||||
} while (remaining > 0);
|
||||
|
||||
// Clear the bits
|
||||
remaining = count;
|
||||
i = 0;
|
||||
do {
|
||||
bits[bit_ind + i] = 0;
|
||||
ClearBit(depth - 1, bit_ind + i);
|
||||
i++;
|
||||
remaining -= 64;
|
||||
} while (remaining > 0);
|
||||
}
|
||||
|
||||
num_bits -= count;
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr void SetBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
const auto ind{offset / 64};
|
||||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
const u64 v{*bit};
|
||||
ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void ClearBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
const auto ind{offset / 64};
|
||||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
u64 v{*bit};
|
||||
ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr s32 GetRequiredDepth(std::size_t region_size) {
|
||||
s32 depth = 0;
|
||||
while (true) {
|
||||
region_size /= 64;
|
||||
depth++;
|
||||
if (region_size == 0) {
|
||||
return depth;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size) {
|
||||
std::size_t overhead_bits = 0;
|
||||
for (s32 depth{GetRequiredDepth(region_size) - 1}; depth >= 0; depth--) {
|
||||
region_size = Common::AlignUp(region_size, 64) / 64;
|
||||
overhead_bits += region_size;
|
||||
}
|
||||
return overhead_bits * sizeof(u64);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
Bitmap bitmap;
|
||||
VAddr heap_address{};
|
||||
uintptr_t end_offset{};
|
||||
std::size_t block_shift{};
|
||||
std::size_t next_block_shift{};
|
||||
|
||||
public:
|
||||
constexpr Block() = default;
|
||||
|
||||
constexpr std::size_t GetShift() const {
|
||||
return block_shift;
|
||||
}
|
||||
constexpr std::size_t GetNextShift() const {
|
||||
return next_block_shift;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return static_cast<std::size_t>(1) << GetShift();
|
||||
}
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return GetSize() / PageSize;
|
||||
}
|
||||
constexpr std::size_t GetNumFreeBlocks() const {
|
||||
return bitmap.GetNumBits();
|
||||
}
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
return GetNumFreeBlocks() * GetNumPages();
|
||||
}
|
||||
|
||||
constexpr u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
|
||||
u64* bit_storage) {
|
||||
// Set shifts
|
||||
block_shift = bs;
|
||||
next_block_shift = nbs;
|
||||
|
||||
// Align up the address
|
||||
VAddr end{addr + size};
|
||||
const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
|
||||
: (1ULL << block_shift)};
|
||||
addr = Common::AlignDown((addr), align);
|
||||
end = Common::AlignUp((end), align);
|
||||
|
||||
heap_address = addr;
|
||||
end_offset = (end - addr) / (1ULL << block_shift);
|
||||
return bitmap.Initialize(bit_storage, end_offset);
|
||||
}
|
||||
|
||||
constexpr VAddr PushBlock(VAddr address) {
|
||||
// Set the bit for the free block
|
||||
std::size_t offset{(address - heap_address) >> GetShift()};
|
||||
bitmap.SetBit(offset);
|
||||
|
||||
// If we have a next shift, try to clear the blocks below and return the address
|
||||
if (GetNextShift()) {
|
||||
const auto diff{1ULL << (GetNextShift() - GetShift())};
|
||||
offset = Common::AlignDown(offset, diff);
|
||||
if (bitmap.ClearRange(offset, diff)) {
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
}
|
||||
|
||||
// We couldn't coalesce, or we're already as big as possible
|
||||
return 0;
|
||||
}
|
||||
|
||||
VAddr PopBlock() {
|
||||
// Find a free block
|
||||
const s64 soffset{bitmap.FindFreeBlock()};
|
||||
if (soffset < 0) {
|
||||
return 0;
|
||||
}
|
||||
const auto offset{static_cast<std::size_t>(soffset)};
|
||||
|
||||
// Update our tracking and return it
|
||||
bitmap.ClearBit(offset);
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size,
|
||||
std::size_t cur_block_shift,
|
||||
std::size_t next_block_shift) {
|
||||
const auto cur_block_size{(1ULL << cur_block_shift)};
|
||||
const auto next_block_size{(1ULL << next_block_shift)};
|
||||
const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
|
||||
return Bitmap::CalculateMetadataOverheadSize(
|
||||
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
PageHeap() = default;
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return heap_address;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return heap_size;
|
||||
}
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
}
|
||||
constexpr std::size_t GetPageOffset(VAddr block) const {
|
||||
return (block - GetAddress()) / PageSize;
|
||||
}
|
||||
|
||||
void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
|
||||
VAddr AllocateBlock(s32 index);
|
||||
void Free(VAddr addr, std::size_t num_pages);
|
||||
|
||||
void UpdateUsedSize() {
|
||||
used_size = heap_size - (GetNumFreePages() * PageSize);
|
||||
}
|
||||
|
||||
static std::size_t CalculateMetadataOverheadSize(std::size_t region_size);
|
||||
|
||||
private:
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
std::size_t num_free{};
|
||||
|
||||
for (const auto& block : blocks) {
|
||||
num_free += block.GetNumFreePages();
|
||||
}
|
||||
|
||||
return num_free;
|
||||
}
|
||||
|
||||
void FreeBlock(VAddr block, s32 index);
|
||||
|
||||
VAddr heap_address{};
|
||||
std::size_t heap_size{};
|
||||
std::size_t used_size{};
|
||||
std::array<Block, NumMemoryBlockPageShifts> blocks{};
|
||||
std::vector<u64> metadata;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
92
src/core/hle/kernel/memory/page_linked_list.h
Executable file
92
src/core/hle/kernel/memory/page_linked_list.h
Executable file
@@ -0,0 +1,92 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class PageLinkedList final {
|
||||
public:
|
||||
class Node final {
|
||||
public:
|
||||
constexpr Node(u64 addr, std::size_t num_pages) : addr{addr}, num_pages{num_pages} {}
|
||||
|
||||
constexpr u64 GetAddress() const {
|
||||
return addr;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
private:
|
||||
u64 addr{};
|
||||
std::size_t num_pages{};
|
||||
};
|
||||
|
||||
public:
|
||||
PageLinkedList() = default;
|
||||
PageLinkedList(u64 address, u64 num_pages) {
|
||||
ASSERT(AddBlock(address, num_pages).IsSuccess());
|
||||
}
|
||||
|
||||
constexpr std::list<Node>& Nodes() {
|
||||
return nodes;
|
||||
}
|
||||
|
||||
constexpr const std::list<Node>& Nodes() const {
|
||||
return nodes;
|
||||
}
|
||||
|
||||
std::size_t GetNumPages() const {
|
||||
std::size_t num_pages = 0;
|
||||
for (const Node& node : nodes) {
|
||||
num_pages += node.GetNumPages();
|
||||
}
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
bool IsEqual(PageLinkedList& other) const {
|
||||
auto this_node = nodes.begin();
|
||||
auto other_node = other.nodes.begin();
|
||||
while (this_node != nodes.end() && other_node != other.nodes.end()) {
|
||||
if (this_node->GetAddress() != other_node->GetAddress() ||
|
||||
this_node->GetNumPages() != other_node->GetNumPages()) {
|
||||
return false;
|
||||
}
|
||||
this_node = std::next(this_node);
|
||||
other_node = std::next(other_node);
|
||||
}
|
||||
|
||||
return this_node == nodes.end() && other_node == other.nodes.end();
|
||||
}
|
||||
|
||||
ResultCode AddBlock(u64 address, u64 num_pages) {
|
||||
if (!num_pages) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
if (!nodes.empty()) {
|
||||
const auto node = nodes.back();
|
||||
if (node.GetAddress() + node.GetNumPages() * PageSize == address) {
|
||||
address = node.GetAddress();
|
||||
num_pages += node.GetNumPages();
|
||||
nodes.pop_back();
|
||||
}
|
||||
}
|
||||
nodes.push_back({address, num_pages});
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
private:
|
||||
std::list<Node> nodes;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
1179
src/core/hle/kernel/memory/page_table.cpp
Executable file
1179
src/core/hle/kernel/memory/page_table.cpp
Executable file
File diff suppressed because it is too large
Load Diff
277
src/core/hle/kernel/memory/page_table.h
Executable file
277
src/core/hle/kernel/memory/page_table.h
Executable file
@@ -0,0 +1,277 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/page_table.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class MemoryBlockManager;
|
||||
|
||||
class PageTable final : NonCopyable {
|
||||
public:
|
||||
explicit PageTable(Core::System& system);
|
||||
|
||||
ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size,
|
||||
Memory::MemoryManager::Pool pool);
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, MemoryState state,
|
||||
MemoryPermission perm);
|
||||
ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
ResultCode UnmapMemory(VAddr addr, std::size_t size);
|
||||
ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state,
|
||||
MemoryPermission perm);
|
||||
ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm);
|
||||
MemoryInfo QueryInfo(VAddr addr);
|
||||
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm);
|
||||
ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
|
||||
ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask,
|
||||
MemoryAttribute value);
|
||||
ResultCode SetHeapCapacity(std::size_t new_heap_capacity);
|
||||
ResultVal<VAddr> SetHeapSize(std::size_t size);
|
||||
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
|
||||
bool is_map_only, VAddr region_start,
|
||||
std::size_t region_num_pages, MemoryState state,
|
||||
MemoryPermission perm, PAddr map_addr = 0);
|
||||
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
|
||||
Common::PageTable& PageTableImpl() {
|
||||
return page_table_impl;
|
||||
}
|
||||
|
||||
const Common::PageTable& PageTableImpl() const {
|
||||
return page_table_impl;
|
||||
}
|
||||
|
||||
private:
|
||||
enum class OperationType : u32 {
|
||||
Map,
|
||||
MapGroup,
|
||||
Unmap,
|
||||
ChangePermissions,
|
||||
ChangePermissionsAndRefresh,
|
||||
};
|
||||
|
||||
static constexpr MemoryAttribute DefaultMemoryIgnoreAttr =
|
||||
MemoryAttribute::DontCareMask | MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared;
|
||||
|
||||
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
|
||||
ResultCode MapPages(VAddr addr, const PageLinkedList& page_linked_list, MemoryPermission perm);
|
||||
void MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end);
|
||||
bool IsRegionMapped(VAddr address, u64 size);
|
||||
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, PageLinkedList& page_linked_list);
|
||||
MemoryInfo QueryInfoImpl(VAddr addr);
|
||||
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
|
||||
std::size_t align);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group,
|
||||
OperationType operation);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr = 0);
|
||||
constexpr VAddr GetRegionAddress(MemoryState state) const;
|
||||
constexpr std::size_t GetRegionSize(MemoryState state) const;
|
||||
constexpr bool CanContain(VAddr addr, std::size_t size, MemoryState state) const;
|
||||
|
||||
constexpr ResultCode CheckMemoryState(const MemoryInfo& info, MemoryState state_mask,
|
||||
MemoryState state, MemoryPermission perm_mask,
|
||||
MemoryPermission perm, MemoryAttribute attr_mask,
|
||||
MemoryAttribute attr) const;
|
||||
ResultCode CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm,
|
||||
MemoryAttribute* out_attr, VAddr addr, std::size_t size,
|
||||
MemoryState state_mask, MemoryState state,
|
||||
MemoryPermission perm_mask, MemoryPermission perm,
|
||||
MemoryAttribute attr_mask, MemoryAttribute attr,
|
||||
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr);
|
||||
ResultCode CheckMemoryState(VAddr addr, std::size_t size, MemoryState state_mask,
|
||||
MemoryState state, MemoryPermission perm_mask,
|
||||
MemoryPermission perm, MemoryAttribute attr_mask,
|
||||
MemoryAttribute attr,
|
||||
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) {
|
||||
return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr, ignore_attr);
|
||||
}
|
||||
|
||||
std::recursive_mutex page_table_lock;
|
||||
std::unique_ptr<MemoryBlockManager> block_manager;
|
||||
|
||||
public:
|
||||
constexpr VAddr GetAddressSpaceStart() const {
|
||||
return address_space_start;
|
||||
}
|
||||
constexpr VAddr GetAddressSpaceEnd() const {
|
||||
return address_space_end;
|
||||
}
|
||||
constexpr std::size_t GetAddressSpaceSize() const {
|
||||
return address_space_end - address_space_start;
|
||||
}
|
||||
constexpr VAddr GetHeapRegionStart() const {
|
||||
return heap_region_start;
|
||||
}
|
||||
constexpr VAddr GetHeapRegionEnd() const {
|
||||
return heap_region_end;
|
||||
}
|
||||
constexpr std::size_t GetHeapRegionSize() const {
|
||||
return heap_region_end - heap_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasRegionStart() const {
|
||||
return alias_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasRegionEnd() const {
|
||||
return alias_region_end;
|
||||
}
|
||||
constexpr std::size_t GetAliasRegionSize() const {
|
||||
return alias_region_end - alias_region_start;
|
||||
}
|
||||
constexpr VAddr GetStackRegionStart() const {
|
||||
return stack_region_start;
|
||||
}
|
||||
constexpr VAddr GetStackRegionEnd() const {
|
||||
return stack_region_end;
|
||||
}
|
||||
constexpr std::size_t GetStackRegionSize() const {
|
||||
return stack_region_end - stack_region_start;
|
||||
}
|
||||
constexpr VAddr GetKernelMapRegionStart() const {
|
||||
return kernel_map_region_start;
|
||||
}
|
||||
constexpr VAddr GetKernelMapRegionEnd() const {
|
||||
return kernel_map_region_end;
|
||||
}
|
||||
constexpr VAddr GetCodeRegionStart() const {
|
||||
return code_region_start;
|
||||
}
|
||||
constexpr VAddr GetCodeRegionEnd() const {
|
||||
return code_region_end;
|
||||
}
|
||||
constexpr VAddr GetAliasCodeRegionStart() const {
|
||||
return alias_code_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasCodeRegionSize() const {
|
||||
return alias_code_region_end - alias_code_region_start;
|
||||
}
|
||||
constexpr std::size_t GetAddressSpaceWidth() const {
|
||||
return address_space_width;
|
||||
}
|
||||
constexpr std::size_t GetHeapSize() {
|
||||
return current_heap_addr - heap_region_start;
|
||||
}
|
||||
constexpr std::size_t GetTotalHeapSize() {
|
||||
return GetHeapSize() + physical_memory_usage;
|
||||
}
|
||||
constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
|
||||
return address_space_start <= address && address + size - 1 <= address_space_end - 1;
|
||||
}
|
||||
constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
|
||||
return alias_region_start > address || address + size - 1 > alias_region_end - 1;
|
||||
}
|
||||
constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
|
||||
return stack_region_start > address || address + size - 1 > stack_region_end - 1;
|
||||
}
|
||||
constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
|
||||
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
|
||||
}
|
||||
constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
|
||||
return address + size > heap_region_start && heap_region_end > address;
|
||||
}
|
||||
constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
|
||||
return address + size > alias_region_start && alias_region_end > address;
|
||||
}
|
||||
constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
|
||||
if (IsInvalidRegion(address, size)) {
|
||||
return true;
|
||||
}
|
||||
if (IsInsideHeapRegion(address, size)) {
|
||||
return true;
|
||||
}
|
||||
if (IsInsideAliasRegion(address, size)) {
|
||||
return true;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
|
||||
return !IsOutsideASLRRegion(address, size);
|
||||
}
|
||||
constexpr PAddr GetPhysicalAddr(VAddr addr) {
|
||||
return page_table_impl.backing_addr[addr >> Memory::PageBits] + addr;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr bool Contains(VAddr addr) const {
|
||||
return address_space_start <= addr && addr <= address_space_end - 1;
|
||||
}
|
||||
constexpr bool Contains(VAddr addr, std::size_t size) const {
|
||||
return address_space_start <= addr && addr < addr + size &&
|
||||
addr + size - 1 <= address_space_end - 1;
|
||||
}
|
||||
constexpr bool IsKernel() const {
|
||||
return is_kernel;
|
||||
}
|
||||
constexpr bool IsAslrEnabled() const {
|
||||
return is_aslr_enabled;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetNumGuardPages() const {
|
||||
return IsKernel() ? 1 : 4;
|
||||
}
|
||||
|
||||
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
|
||||
return (address_space_start <= addr) &&
|
||||
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
|
||||
(addr + num_pages * PageSize - 1 <= address_space_end - 1);
|
||||
}
|
||||
|
||||
private:
|
||||
VAddr address_space_start{};
|
||||
VAddr address_space_end{};
|
||||
VAddr heap_region_start{};
|
||||
VAddr heap_region_end{};
|
||||
VAddr current_heap_end{};
|
||||
VAddr alias_region_start{};
|
||||
VAddr alias_region_end{};
|
||||
VAddr stack_region_start{};
|
||||
VAddr stack_region_end{};
|
||||
VAddr kernel_map_region_start{};
|
||||
VAddr kernel_map_region_end{};
|
||||
VAddr code_region_start{};
|
||||
VAddr code_region_end{};
|
||||
VAddr alias_code_region_start{};
|
||||
VAddr alias_code_region_end{};
|
||||
VAddr current_heap_addr{};
|
||||
|
||||
std::size_t heap_capacity{};
|
||||
std::size_t physical_memory_usage{};
|
||||
std::size_t max_heap_size{};
|
||||
std::size_t max_physical_memory_size{};
|
||||
std::size_t address_space_width{};
|
||||
|
||||
bool is_kernel{};
|
||||
bool is_aslr_enabled{};
|
||||
|
||||
MemoryManager::Pool memory_pool{MemoryManager::Pool::Application};
|
||||
|
||||
Common::PageTable page_table_impl;
|
||||
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
163
src/core/hle/kernel/memory/slab_heap.h
Executable file
163
src/core/hle/kernel/memory/slab_heap.h
Executable file
@@ -0,0 +1,163 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class SlabHeapImpl final : NonCopyable {
|
||||
public:
|
||||
struct Node {
|
||||
Node* next{};
|
||||
};
|
||||
|
||||
constexpr SlabHeapImpl() = default;
|
||||
|
||||
void Initialize(std::size_t size) {
|
||||
ASSERT(head == nullptr);
|
||||
obj_size = size;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetObjectSize() const {
|
||||
return obj_size;
|
||||
}
|
||||
|
||||
Node* GetHead() const {
|
||||
return head;
|
||||
}
|
||||
|
||||
void* Allocate() {
|
||||
Node* ret = head.load();
|
||||
|
||||
do {
|
||||
if (ret == nullptr) {
|
||||
break;
|
||||
}
|
||||
} while (!head.compare_exchange_weak(ret, ret->next));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void Free(void* obj) {
|
||||
Node* node = static_cast<Node*>(obj);
|
||||
|
||||
Node* cur_head = head.load();
|
||||
do {
|
||||
node->next = cur_head;
|
||||
} while (!head.compare_exchange_weak(cur_head, node));
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<Node*> head{};
|
||||
std::size_t obj_size{};
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
class SlabHeapBase : NonCopyable {
|
||||
public:
|
||||
constexpr SlabHeapBase() = default;
|
||||
|
||||
constexpr bool Contains(uintptr_t addr) const {
|
||||
return start <= addr && addr < end;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetSlabHeapSize() const {
|
||||
return (end - start) / GetObjectSize();
|
||||
}
|
||||
|
||||
constexpr std::size_t GetObjectSize() const {
|
||||
return impl.GetObjectSize();
|
||||
}
|
||||
|
||||
constexpr uintptr_t GetSlabHeapAddress() const {
|
||||
return start;
|
||||
}
|
||||
|
||||
std::size_t GetObjectIndexImpl(const void* obj) const {
|
||||
return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
|
||||
}
|
||||
|
||||
std::size_t GetPeakIndex() const {
|
||||
return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
|
||||
}
|
||||
|
||||
void* AllocateImpl() {
|
||||
return impl.Allocate();
|
||||
}
|
||||
|
||||
void FreeImpl(void* obj) {
|
||||
// Don't allow freeing an object that wasn't allocated from this heap
|
||||
ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
|
||||
impl.Free(obj);
|
||||
}
|
||||
|
||||
void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
|
||||
// Ensure we don't initialize a slab using null memory
|
||||
ASSERT(memory != nullptr);
|
||||
|
||||
// Initialize the base allocator
|
||||
impl.Initialize(obj_size);
|
||||
|
||||
// Set our tracking variables
|
||||
const std::size_t num_obj = (memory_size / obj_size);
|
||||
start = reinterpret_cast<uintptr_t>(memory);
|
||||
end = start + num_obj * obj_size;
|
||||
peak = start;
|
||||
|
||||
// Free the objects
|
||||
u8* cur = reinterpret_cast<u8*>(end);
|
||||
|
||||
for (std::size_t i{}; i < num_obj; i++) {
|
||||
cur -= obj_size;
|
||||
impl.Free(cur);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
using Impl = impl::SlabHeapImpl;
|
||||
|
||||
Impl impl;
|
||||
uintptr_t peak{};
|
||||
uintptr_t start{};
|
||||
uintptr_t end{};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class SlabHeap final : public SlabHeapBase {
|
||||
public:
|
||||
constexpr SlabHeap() : SlabHeapBase() {}
|
||||
|
||||
void Initialize(void* memory, std::size_t memory_size) {
|
||||
InitializeImpl(sizeof(T), memory, memory_size);
|
||||
}
|
||||
|
||||
T* Allocate() {
|
||||
T* obj = static_cast<T*>(AllocateImpl());
|
||||
if (obj != nullptr) {
|
||||
new (obj) T();
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
void Free(T* obj) {
|
||||
FreeImpl(obj);
|
||||
}
|
||||
|
||||
constexpr std::size_t GetObjectIndex(const T* obj) const {
|
||||
return GetObjectIndexImpl(obj);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
40
src/core/hle/kernel/memory/system_control.cpp
Executable file
40
src/core/hle/kernel/memory/system_control.cpp
Executable file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <random>
|
||||
|
||||
#include "core/hle/kernel/memory/system_control.h"
|
||||
|
||||
namespace Kernel::Memory::SystemControl {
|
||||
namespace {
|
||||
template <typename F>
|
||||
u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
||||
// Handle the case where the difference is too large to represent.
|
||||
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
|
||||
return f();
|
||||
}
|
||||
|
||||
// Iterate until we get a value in range.
|
||||
const u64 range_size = ((max + 1) - min);
|
||||
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
|
||||
while (true) {
|
||||
if (const u64 rnd = f(); rnd < effective_max) {
|
||||
return min + (rnd % range_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u64 GenerateRandomU64ForInit() {
|
||||
static std::random_device device;
|
||||
static std::mt19937 gen(device());
|
||||
static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
|
||||
return distribution(gen);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
u64 GenerateRandomRange(u64 min, u64 max) {
|
||||
return GenerateUniformRange(min, max, GenerateRandomU64ForInit);
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory::SystemControl
|
||||
13
src/core/hle/kernel/memory/system_control.h
Executable file
13
src/core/hle/kernel/memory/system_control.h
Executable file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory::SystemControl {
|
||||
|
||||
u64 GenerateRandomRange(u64 min, u64 max);
|
||||
|
||||
} // namespace Kernel::Memory::SystemControl
|
||||
170
src/core/hle/kernel/mutex.cpp
Executable file
170
src/core/hle/kernel/mutex.cpp
Executable file
@@ -0,0 +1,170 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
/// Returns the number of threads that are waiting for a mutex, and the highest priority one among
|
||||
/// those.
|
||||
static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
|
||||
const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) {
|
||||
|
||||
std::shared_ptr<Thread> highest_priority_thread;
|
||||
u32 num_waiters = 0;
|
||||
|
||||
for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
|
||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
||||
continue;
|
||||
|
||||
++num_waiters;
|
||||
if (highest_priority_thread == nullptr ||
|
||||
thread->GetPriority() < highest_priority_thread->GetPriority()) {
|
||||
highest_priority_thread = thread;
|
||||
}
|
||||
}
|
||||
|
||||
return {highest_priority_thread, num_waiters};
|
||||
}
|
||||
|
||||
/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
|
||||
static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
|
||||
std::shared_ptr<Thread> new_owner) {
|
||||
current_thread->RemoveMutexWaiter(new_owner);
|
||||
const auto threads = current_thread->GetMutexWaitingThreads();
|
||||
for (const auto& thread : threads) {
|
||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
||||
continue;
|
||||
|
||||
ASSERT(thread->GetLockOwner() == current_thread.get());
|
||||
current_thread->RemoveMutexWaiter(thread);
|
||||
if (new_owner != thread)
|
||||
new_owner->AddMutexWaiter(thread);
|
||||
}
|
||||
}
|
||||
|
||||
Mutex::Mutex(Core::System& system) : system{system} {}
|
||||
Mutex::~Mutex() = default;
|
||||
|
||||
ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
Handle requesting_thread_handle) {
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
std::shared_ptr<Thread> current_thread =
|
||||
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
||||
std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
|
||||
std::shared_ptr<Thread> requesting_thread =
|
||||
handle_table.Get<Thread>(requesting_thread_handle);
|
||||
|
||||
// TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
|
||||
// another thread.
|
||||
ASSERT(requesting_thread == current_thread);
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
|
||||
const u32 addr_value = system.Memory().Read32(address);
|
||||
|
||||
// If the mutex isn't being held, just return success.
|
||||
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
if (holding_thread == nullptr) {
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
// Wait until the mutex is released
|
||||
current_thread->SetMutexWaitAddress(address);
|
||||
current_thread->SetWaitHandle(requesting_thread_handle);
|
||||
|
||||
current_thread->SetStatus(ThreadStatus::WaitMutex);
|
||||
|
||||
// Update the lock holder thread's priority to prevent priority inversion.
|
||||
holding_thread->AddMutexWaiter(current_thread);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
auto* owner = current_thread->GetLockOwner();
|
||||
if (owner != nullptr) {
|
||||
owner->RemoveMutexWaiter(current_thread);
|
||||
}
|
||||
}
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
|
||||
VAddr address) {
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
|
||||
return {ERR_INVALID_ADDRESS, nullptr};
|
||||
}
|
||||
|
||||
auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
|
||||
if (new_owner == nullptr) {
|
||||
system.Memory().Write32(address, 0);
|
||||
return {RESULT_SUCCESS, nullptr};
|
||||
}
|
||||
// Transfer the ownership of the mutex from the previous owner to the new one.
|
||||
TransferMutexOwnership(address, owner, new_owner);
|
||||
u32 mutex_value = new_owner->GetWaitHandle();
|
||||
if (num_waiters >= 2) {
|
||||
// Notify the guest that there are still some threads waiting for the mutex
|
||||
mutex_value |= Mutex::MutexHasWaitersFlag;
|
||||
}
|
||||
new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
new_owner->SetLockOwner(nullptr);
|
||||
new_owner->ResumeFromWait();
|
||||
|
||||
system.Memory().Write32(address, mutex_value);
|
||||
return {RESULT_SUCCESS, new_owner};
|
||||
}
|
||||
|
||||
ResultCode Mutex::Release(VAddr address) {
|
||||
auto& kernel = system.Kernel();
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
std::shared_ptr<Thread> current_thread =
|
||||
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||
|
||||
auto [result, new_owner] = Unlock(current_thread, address);
|
||||
|
||||
if (result != RESULT_SUCCESS && new_owner != nullptr) {
|
||||
new_owner->SetSynchronizationResults(nullptr, result);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
42
src/core/hle/kernel/mutex.h
Executable file
42
src/core/hle/kernel/mutex.h
Executable file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Mutex final {
|
||||
public:
|
||||
explicit Mutex(Core::System& system);
|
||||
~Mutex();
|
||||
|
||||
/// Flag that indicates that a mutex still has threads waiting for it.
|
||||
static constexpr u32 MutexHasWaitersFlag = 0x40000000;
|
||||
/// Mask of the bits in a mutex address value that contain the mutex owner.
|
||||
static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
|
||||
|
||||
/// Attempts to acquire a mutex at the specified address.
|
||||
ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
Handle requesting_thread_handle);
|
||||
|
||||
/// Unlocks a mutex for owner at address
|
||||
std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
|
||||
VAddr address);
|
||||
|
||||
/// Releases the mutex at the specified address.
|
||||
ResultCode Release(VAddr address);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
38
src/core/hle/kernel/object.cpp
Executable file
38
src/core/hle/kernel/object.cpp
Executable file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2018 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Object::Object(KernelCore& kernel) : kernel{kernel}, object_id{kernel.CreateNewObjectID()} {}
|
||||
Object::~Object() = default;
|
||||
|
||||
bool Object::IsWaitable() const {
|
||||
switch (GetHandleType()) {
|
||||
case HandleType::ReadableEvent:
|
||||
case HandleType::Thread:
|
||||
case HandleType::Process:
|
||||
case HandleType::ServerPort:
|
||||
case HandleType::ServerSession:
|
||||
return true;
|
||||
|
||||
case HandleType::Unknown:
|
||||
case HandleType::WritableEvent:
|
||||
case HandleType::SharedMemory:
|
||||
case HandleType::TransferMemory:
|
||||
case HandleType::ResourceLimit:
|
||||
case HandleType::ClientPort:
|
||||
case HandleType::ClientSession:
|
||||
case HandleType::Session:
|
||||
return false;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
86
src/core/hle/kernel/object.h
Executable file
86
src/core/hle/kernel/object.h
Executable file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2018 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
using Handle = u32;
|
||||
|
||||
enum class HandleType : u32 {
|
||||
Unknown,
|
||||
WritableEvent,
|
||||
ReadableEvent,
|
||||
SharedMemory,
|
||||
TransferMemory,
|
||||
Thread,
|
||||
Process,
|
||||
ResourceLimit,
|
||||
ClientPort,
|
||||
ServerPort,
|
||||
ClientSession,
|
||||
ServerSession,
|
||||
Session,
|
||||
};
|
||||
|
||||
class Object : NonCopyable, public std::enable_shared_from_this<Object> {
|
||||
public:
|
||||
explicit Object(KernelCore& kernel);
|
||||
virtual ~Object();
|
||||
|
||||
/// Returns a unique identifier for the object. For debugging purposes only.
|
||||
u32 GetObjectId() const {
|
||||
return object_id.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
virtual std::string GetTypeName() const {
|
||||
return "[BAD KERNEL OBJECT TYPE]";
|
||||
}
|
||||
virtual std::string GetName() const {
|
||||
return "[UNKNOWN KERNEL OBJECT]";
|
||||
}
|
||||
virtual HandleType GetHandleType() const = 0;
|
||||
|
||||
/**
|
||||
* Check if a thread can wait on the object
|
||||
* @return True if a thread can wait on the object, otherwise false
|
||||
*/
|
||||
bool IsWaitable() const;
|
||||
|
||||
protected:
|
||||
/// The kernel instance this object was created under.
|
||||
KernelCore& kernel;
|
||||
|
||||
private:
|
||||
std::atomic<u32> object_id{0};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> SharedFrom(T* raw) {
|
||||
if (raw == nullptr)
|
||||
return nullptr;
|
||||
return std::static_pointer_cast<T>(raw->shared_from_this());
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to downcast the given Object pointer to a pointer to T.
|
||||
* @return Derived pointer to the object, or `nullptr` if `object` isn't of type T.
|
||||
*/
|
||||
template <typename T>
|
||||
inline std::shared_ptr<T> DynamicObjectCast(std::shared_ptr<Object> object) {
|
||||
if (object != nullptr && object->GetHandleType() == T::HANDLE_TYPE) {
|
||||
return std::static_pointer_cast<T>(object);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
62
src/core/hle/kernel/physical_core.cpp
Executable file
62
src/core/hle/kernel/physical_core.cpp
Executable file
@@ -0,0 +1,62 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/arm/cpu_interrupt_handler.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_32.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system,
|
||||
Kernel::KScheduler& scheduler, Core::CPUInterrupts& interrupts)
|
||||
: core_index{core_index}, system{system}, scheduler{scheduler},
|
||||
interrupts{interrupts}, guard{std::make_unique<Common::SpinLock>()} {}
|
||||
|
||||
PhysicalCore::~PhysicalCore() = default;
|
||||
|
||||
void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
auto& kernel = system.Kernel();
|
||||
if (is_64_bit) {
|
||||
arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
|
||||
system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
|
||||
} else {
|
||||
arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
|
||||
system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
|
||||
}
|
||||
#else
|
||||
#error Platform not supported yet.
|
||||
#endif
|
||||
}
|
||||
|
||||
void PhysicalCore::Run() {
|
||||
arm_interface->Run();
|
||||
}
|
||||
|
||||
void PhysicalCore::Idle() {
|
||||
interrupts[core_index].AwaitInterrupt();
|
||||
}
|
||||
|
||||
bool PhysicalCore::IsInterrupted() const {
|
||||
return interrupts[core_index].IsInterrupted();
|
||||
}
|
||||
|
||||
void PhysicalCore::Interrupt() {
|
||||
guard->lock();
|
||||
interrupts[core_index].SetInterrupt(true);
|
||||
guard->unlock();
|
||||
}
|
||||
|
||||
void PhysicalCore::ClearInterrupt() {
|
||||
guard->lock();
|
||||
interrupts[core_index].SetInterrupt(false);
|
||||
guard->unlock();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
99
src/core/hle/kernel/physical_core.h
Executable file
99
src/core/hle/kernel/physical_core.h
Executable file
@@ -0,0 +1,99 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
|
||||
#include "core/arm/arm_interface.h"
|
||||
|
||||
namespace Common {
|
||||
class SpinLock;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
class KScheduler;
|
||||
} // namespace Kernel
|
||||
|
||||
namespace Core {
|
||||
class CPUInterruptHandler;
|
||||
class ExclusiveMonitor;
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class PhysicalCore {
|
||||
public:
|
||||
PhysicalCore(std::size_t core_index, Core::System& system, Kernel::KScheduler& scheduler,
|
||||
Core::CPUInterrupts& interrupts);
|
||||
~PhysicalCore();
|
||||
|
||||
PhysicalCore(const PhysicalCore&) = delete;
|
||||
PhysicalCore& operator=(const PhysicalCore&) = delete;
|
||||
|
||||
PhysicalCore(PhysicalCore&&) = default;
|
||||
PhysicalCore& operator=(PhysicalCore&&) = delete;
|
||||
|
||||
/// Initialize the core for the specified parameters.
|
||||
void Initialize(bool is_64_bit);
|
||||
|
||||
/// Execute current jit state
|
||||
void Run();
|
||||
|
||||
void Idle();
|
||||
|
||||
/// Interrupt this physical core.
|
||||
void Interrupt();
|
||||
|
||||
/// Clear this core's interrupt
|
||||
void ClearInterrupt();
|
||||
|
||||
/// Check if this core is interrupted
|
||||
bool IsInterrupted() const;
|
||||
|
||||
bool IsInitialized() const {
|
||||
return arm_interface != nullptr;
|
||||
}
|
||||
|
||||
Core::ARM_Interface& ArmInterface() {
|
||||
return *arm_interface;
|
||||
}
|
||||
|
||||
const Core::ARM_Interface& ArmInterface() const {
|
||||
return *arm_interface;
|
||||
}
|
||||
|
||||
bool IsMainCore() const {
|
||||
return core_index == 0;
|
||||
}
|
||||
|
||||
bool IsSystemCore() const {
|
||||
return core_index == 3;
|
||||
}
|
||||
|
||||
std::size_t CoreIndex() const {
|
||||
return core_index;
|
||||
}
|
||||
|
||||
Kernel::KScheduler& Scheduler() {
|
||||
return scheduler;
|
||||
}
|
||||
|
||||
const Kernel::KScheduler& Scheduler() const {
|
||||
return scheduler;
|
||||
}
|
||||
|
||||
private:
|
||||
const std::size_t core_index;
|
||||
Core::System& system;
|
||||
Kernel::KScheduler& scheduler;
|
||||
Core::CPUInterrupts& interrupts;
|
||||
std::unique_ptr<Common::SpinLock> guard;
|
||||
std::unique_ptr<Core::ARM_Interface> arm_interface;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
24
src/core/hle/kernel/physical_memory.h
Executable file
24
src/core/hle/kernel/physical_memory.h
Executable file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2019 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "common/alignment.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
// This encapsulation serves 2 purposes:
|
||||
// - First, to encapsulate host physical memory under a single type and set an
|
||||
// standard for managing it.
|
||||
// - Second to ensure all host backing memory used is aligned to 256 bytes due
|
||||
// to strict alignment restrictions on GPU memory.
|
||||
|
||||
using PhysicalMemoryVector = std::vector<u8, Common::AlignmentAllocator<u8, 256>>;
|
||||
class PhysicalMemory final : public PhysicalMemoryVector {
|
||||
using PhysicalMemoryVector::PhysicalMemoryVector;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
454
src/core/hle/kernel/process.cpp
Executable file
454
src/core/hle/kernel/process.cpp
Executable file
@@ -0,0 +1,454 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <bitset>
|
||||
#include <ctime>
|
||||
#include <memory>
|
||||
#include <random>
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/memory/slab_heap.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
/**
|
||||
* Sets up the primary application thread
|
||||
*
|
||||
* @param system The system instance to create the main thread under.
|
||||
* @param owner_process The parent process for the main thread
|
||||
* @param priority The priority to give the main thread
|
||||
*/
|
||||
void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
|
||||
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
|
||||
ThreadType type = THREADTYPE_USER;
|
||||
auto thread_res = Thread::Create(system, type, "main", entry_point, priority, 0,
|
||||
owner_process.GetIdealCore(), stack_top, &owner_process);
|
||||
|
||||
std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap();
|
||||
|
||||
// Register 1 must be a handle to the main thread
|
||||
const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap();
|
||||
thread->GetContext32().cpu_registers[0] = 0;
|
||||
thread->GetContext64().cpu_registers[0] = 0;
|
||||
thread->GetContext32().cpu_registers[1] = thread_handle;
|
||||
thread->GetContext64().cpu_registers[1] = thread_handle;
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
||||
{
|
||||
KScopedSchedulerLock lock{kernel};
|
||||
thread->SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
// Represents a page used for thread-local storage.
|
||||
//
|
||||
// Each TLS page contains slots that may be used by processes and threads.
|
||||
// Every process and thread is created with a slot in some arbitrary page
|
||||
// (whichever page happens to have an available slot).
|
||||
class TLSPage {
|
||||
public:
|
||||
static constexpr std::size_t num_slot_entries =
|
||||
Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE;
|
||||
|
||||
explicit TLSPage(VAddr address) : base_address{address} {}
|
||||
|
||||
bool HasAvailableSlots() const {
|
||||
return !is_slot_used.all();
|
||||
}
|
||||
|
||||
VAddr GetBaseAddress() const {
|
||||
return base_address;
|
||||
}
|
||||
|
||||
std::optional<VAddr> ReserveSlot() {
|
||||
for (std::size_t i = 0; i < is_slot_used.size(); i++) {
|
||||
if (is_slot_used[i]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
is_slot_used[i] = true;
|
||||
return base_address + (i * Core::Memory::TLS_ENTRY_SIZE);
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void ReleaseSlot(VAddr address) {
|
||||
// Ensure that all given addresses are consistent with how TLS pages
|
||||
// are intended to be used when releasing slots.
|
||||
ASSERT(IsWithinPage(address));
|
||||
ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0);
|
||||
|
||||
const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE;
|
||||
is_slot_used[index] = false;
|
||||
}
|
||||
|
||||
private:
|
||||
bool IsWithinPage(VAddr address) const {
|
||||
return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE;
|
||||
}
|
||||
|
||||
VAddr base_address;
|
||||
std::bitset<num_slot_entries> is_slot_used;
|
||||
};
|
||||
|
||||
std::shared_ptr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
std::shared_ptr<Process> process = std::make_shared<Process>(system);
|
||||
process->name = std::move(name);
|
||||
process->resource_limit = ResourceLimit::Create(kernel);
|
||||
process->status = ProcessStatus::Created;
|
||||
process->program_id = 0;
|
||||
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
|
||||
: kernel.CreateNewUserProcessID();
|
||||
process->capabilities.InitializeForMetadatalessProcess();
|
||||
|
||||
std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr)));
|
||||
std::uniform_int_distribution<u64> distribution;
|
||||
std::generate(process->random_entropy.begin(), process->random_entropy.end(),
|
||||
[&] { return distribution(rng); });
|
||||
|
||||
kernel.AppendNewProcess(process);
|
||||
return process;
|
||||
}
|
||||
|
||||
std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
|
||||
return resource_limit;
|
||||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryAvailable() const {
|
||||
const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) +
|
||||
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
|
||||
main_thread_stack_size};
|
||||
|
||||
if (capacity < memory_usage_capacity) {
|
||||
return capacity;
|
||||
}
|
||||
|
||||
return memory_usage_capacity;
|
||||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
|
||||
return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
|
||||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryUsed() const {
|
||||
return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() +
|
||||
GetSystemResourceSize();
|
||||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
|
||||
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
|
||||
}
|
||||
|
||||
void Process::InsertConditionVariableThread(std::shared_ptr<Thread> thread) {
|
||||
VAddr cond_var_addr = thread->GetCondVarWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
const std::shared_ptr<Thread> current_thread = *it;
|
||||
if (current_thread->GetPriority() > thread->GetPriority()) {
|
||||
thread_list.insert(it, thread);
|
||||
return;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
|
||||
void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) {
|
||||
VAddr cond_var_addr = thread->GetCondVarWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
const std::shared_ptr<Thread> current_thread = *it;
|
||||
if (current_thread.get() == thread.get()) {
|
||||
thread_list.erase(it);
|
||||
return;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads(
|
||||
const VAddr cond_var_addr) {
|
||||
std::vector<std::shared_ptr<Thread>> result{};
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
std::shared_ptr<Thread> current_thread = *it;
|
||||
result.push_back(current_thread);
|
||||
++it;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void Process::RegisterThread(const Thread* thread) {
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
|
||||
void Process::UnregisterThread(const Thread* thread) {
|
||||
thread_list.remove(thread);
|
||||
}
|
||||
|
||||
ResultCode Process::ClearSignalState() {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
if (status == ProcessStatus::Exited) {
|
||||
LOG_ERROR(Kernel, "called on a terminated process instance.");
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
if (!is_signaled) {
|
||||
LOG_ERROR(Kernel, "called on a process instance that isn't signaled.");
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
is_signaled = false;
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
std::size_t code_size) {
|
||||
program_id = metadata.GetTitleID();
|
||||
ideal_core = metadata.GetMainThreadCore();
|
||||
is_64bit_process = metadata.Is64BitProgram();
|
||||
system_resource_size = metadata.GetSystemResourceSize();
|
||||
image_size = code_size;
|
||||
|
||||
// Initialize proces address space
|
||||
if (const ResultCode result{
|
||||
page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
|
||||
code_size, Memory::MemoryManager::Pool::Application)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Map process code region
|
||||
if (const ResultCode result{page_table->MapProcessCode(
|
||||
page_table->GetCodeRegionStart(), code_size / Memory::PageSize,
|
||||
Memory::MemoryState::Code, Memory::MemoryPermission::None)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Initialize process capabilities
|
||||
const auto& caps{metadata.GetKernelCapabilities()};
|
||||
if (const ResultCode result{
|
||||
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Set memory usage capacity
|
||||
switch (metadata.GetAddressSpaceType()) {
|
||||
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
||||
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart();
|
||||
break;
|
||||
|
||||
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
||||
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() +
|
||||
page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart();
|
||||
break;
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// Set initial resource limits
|
||||
resource_limit->SetLimitValue(
|
||||
ResourceType::PhysicalMemory,
|
||||
kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
|
||||
resource_limit->SetLimitValue(ResourceType::Threads, 608);
|
||||
resource_limit->SetLimitValue(ResourceType::Events, 700);
|
||||
resource_limit->SetLimitValue(ResourceType::TransferMemory, 128);
|
||||
resource_limit->SetLimitValue(ResourceType::Sessions, 894);
|
||||
ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size));
|
||||
|
||||
// Create TLS region
|
||||
tls_region_address = CreateTLSRegion();
|
||||
|
||||
return handle_table.SetSize(capabilities.GetHandleTableSize());
|
||||
}
|
||||
|
||||
void Process::Run(s32 main_thread_priority, u64 stack_size) {
|
||||
AllocateMainThreadStack(stack_size);
|
||||
|
||||
const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size};
|
||||
ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError());
|
||||
|
||||
ChangeStatus(ProcessStatus::Running);
|
||||
|
||||
SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
|
||||
resource_limit->Reserve(ResourceType::Threads, 1);
|
||||
resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size);
|
||||
}
|
||||
|
||||
void Process::PrepareForTermination() {
|
||||
ChangeStatus(ProcessStatus::Exiting);
|
||||
|
||||
const auto stop_threads = [this](const std::vector<std::shared_ptr<Thread>>& thread_list) {
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->GetOwnerProcess() != this)
|
||||
continue;
|
||||
|
||||
if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread())
|
||||
continue;
|
||||
|
||||
// TODO(Subv): When are the other running/ready threads terminated?
|
||||
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynch,
|
||||
"Exiting processes with non-waiting threads is currently unimplemented");
|
||||
|
||||
thread->Stop();
|
||||
}
|
||||
};
|
||||
|
||||
stop_threads(system.GlobalSchedulerContext().GetThreadList());
|
||||
|
||||
FreeTLSRegion(tls_region_address);
|
||||
tls_region_address = 0;
|
||||
|
||||
ChangeStatus(ProcessStatus::Exited);
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to find a TLS page that contains a free slot for
|
||||
* use by a thread.
|
||||
*
|
||||
* @returns If a page with an available slot is found, then an iterator
|
||||
* pointing to the page is returned. Otherwise the end iterator
|
||||
* is returned instead.
|
||||
*/
|
||||
static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
|
||||
return std::find_if(tls_pages.begin(), tls_pages.end(),
|
||||
[](const auto& page) { return page.HasAvailableSlots(); });
|
||||
}
|
||||
|
||||
VAddr Process::CreateTLSRegion() {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
|
||||
tls_page_iter != tls_pages.cend()) {
|
||||
return *tls_page_iter->ReserveSlot();
|
||||
}
|
||||
|
||||
Memory::Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
|
||||
ASSERT(tls_page_ptr);
|
||||
|
||||
const VAddr start{page_table->GetKernelMapRegionStart()};
|
||||
const VAddr size{page_table->GetKernelMapRegionEnd() - start};
|
||||
const PAddr tls_map_addr{system.DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
|
||||
const VAddr tls_page_addr{
|
||||
page_table
|
||||
->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize,
|
||||
Memory::MemoryState::ThreadLocal,
|
||||
Memory::MemoryPermission::ReadAndWrite, tls_map_addr)
|
||||
.ValueOr(0)};
|
||||
|
||||
ASSERT(tls_page_addr);
|
||||
|
||||
std::memset(tls_page_ptr, 0, Memory::PageSize);
|
||||
tls_pages.emplace_back(tls_page_addr);
|
||||
|
||||
const auto reserve_result{tls_pages.back().ReserveSlot()};
|
||||
ASSERT(reserve_result.has_value());
|
||||
|
||||
return *reserve_result;
|
||||
}
|
||||
|
||||
void Process::FreeTLSRegion(VAddr tls_address) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
|
||||
auto iter =
|
||||
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
|
||||
return page.GetBaseAddress() == aligned_address;
|
||||
});
|
||||
|
||||
// Something has gone very wrong if we're freeing a region
|
||||
// with no actual page available.
|
||||
ASSERT(iter != tls_pages.cend());
|
||||
|
||||
iter->ReleaseSlot(tls_address);
|
||||
}
|
||||
|
||||
void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||
std::lock_guard lock{HLE::g_hle_lock};
|
||||
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
|
||||
Memory::MemoryPermission permission) {
|
||||
page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
||||
};
|
||||
|
||||
system.Memory().WriteBlock(*this, base_addr, code_set.memory.data(), code_set.memory.size());
|
||||
|
||||
ReprotectSegment(code_set.CodeSegment(), Memory::MemoryPermission::ReadAndExecute);
|
||||
ReprotectSegment(code_set.RODataSegment(), Memory::MemoryPermission::Read);
|
||||
ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite);
|
||||
}
|
||||
|
||||
Process::Process(Core::System& system)
|
||||
: SynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>(
|
||||
system)},
|
||||
handle_table{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {}
|
||||
|
||||
Process::~Process() = default;
|
||||
|
||||
void Process::Acquire(Thread* thread) {
|
||||
ASSERT_MSG(!ShouldWait(thread), "Object unavailable!");
|
||||
}
|
||||
|
||||
bool Process::ShouldWait(const Thread* thread) const {
|
||||
return !is_signaled;
|
||||
}
|
||||
|
||||
void Process::ChangeStatus(ProcessStatus new_status) {
|
||||
if (status == new_status) {
|
||||
return;
|
||||
}
|
||||
|
||||
status = new_status;
|
||||
is_signaled = true;
|
||||
Signal();
|
||||
}
|
||||
|
||||
ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) {
|
||||
ASSERT(stack_size);
|
||||
|
||||
// The kernel always ensures that the given stack size is page aligned.
|
||||
main_thread_stack_size = Common::AlignUp(stack_size, Memory::PageSize);
|
||||
|
||||
const VAddr start{page_table->GetStackRegionStart()};
|
||||
const std::size_t size{page_table->GetStackRegionEnd() - start};
|
||||
|
||||
CASCADE_RESULT(main_thread_stack_top,
|
||||
page_table->AllocateAndMapMemory(
|
||||
main_thread_stack_size / Memory::PageSize, Memory::PageSize, false, start,
|
||||
size / Memory::PageSize, Memory::MemoryState::Stack,
|
||||
Memory::MemoryPermission::ReadAndWrite));
|
||||
|
||||
main_thread_stack_top += main_thread_stack_size;
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
417
src/core/hle/kernel/process.h
Executable file
417
src/core/hle/kernel/process.h
Executable file
@@ -0,0 +1,417 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <list>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/process_capability.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace FileSys {
|
||||
class ProgramMetadata;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class ResourceLimit;
|
||||
class Thread;
|
||||
class TLSPage;
|
||||
|
||||
struct CodeSet;
|
||||
|
||||
namespace Memory {
|
||||
class PageTable;
|
||||
}
|
||||
|
||||
enum class MemoryRegion : u16 {
|
||||
APPLICATION = 1,
|
||||
SYSTEM = 2,
|
||||
BASE = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* Indicates the status of a Process instance.
|
||||
*
|
||||
* @note These match the values as used by kernel,
|
||||
* so new entries should only be added if RE
|
||||
* shows that a new value has been introduced.
|
||||
*/
|
||||
enum class ProcessStatus {
|
||||
Created,
|
||||
CreatedWithDebuggerAttached,
|
||||
Running,
|
||||
WaitingForDebuggerToAttach,
|
||||
DebuggerAttached,
|
||||
Exiting,
|
||||
Exited,
|
||||
DebugBreak,
|
||||
};
|
||||
|
||||
class Process final : public SynchronizationObject {
|
||||
public:
|
||||
explicit Process(Core::System& system);
|
||||
~Process() override;
|
||||
|
||||
enum : u64 {
|
||||
/// Lowest allowed process ID for a kernel initial process.
|
||||
InitialKIPIDMin = 1,
|
||||
/// Highest allowed process ID for a kernel initial process.
|
||||
InitialKIPIDMax = 80,
|
||||
|
||||
/// Lowest allowed process ID for a userland process.
|
||||
ProcessIDMin = 81,
|
||||
/// Highest allowed process ID for a userland process.
|
||||
ProcessIDMax = 0xFFFFFFFFFFFFFFFF,
|
||||
};
|
||||
|
||||
// Used to determine how process IDs are assigned.
|
||||
enum class ProcessType {
|
||||
KernelInternal,
|
||||
Userland,
|
||||
};
|
||||
|
||||
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
|
||||
|
||||
static std::shared_ptr<Process> Create(Core::System& system, std::string name,
|
||||
ProcessType type);
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "Process";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::Process;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' page table.
|
||||
Memory::PageTable& PageTable() {
|
||||
return *page_table;
|
||||
}
|
||||
|
||||
/// Gets const a reference to the process' page table.
|
||||
const Memory::PageTable& PageTable() const {
|
||||
return *page_table;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' handle table.
|
||||
HandleTable& GetHandleTable() {
|
||||
return handle_table;
|
||||
}
|
||||
|
||||
/// Gets a const reference to the process' handle table.
|
||||
const HandleTable& GetHandleTable() const {
|
||||
return handle_table;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' address arbiter.
|
||||
AddressArbiter& GetAddressArbiter() {
|
||||
return address_arbiter;
|
||||
}
|
||||
|
||||
/// Gets a const reference to the process' address arbiter.
|
||||
const AddressArbiter& GetAddressArbiter() const {
|
||||
return address_arbiter;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' mutex lock.
|
||||
Mutex& GetMutex() {
|
||||
return mutex;
|
||||
}
|
||||
|
||||
/// Gets a const reference to the process' mutex lock
|
||||
const Mutex& GetMutex() const {
|
||||
return mutex;
|
||||
}
|
||||
|
||||
/// Gets the address to the process' dedicated TLS region.
|
||||
VAddr GetTLSRegionAddress() const {
|
||||
return tls_region_address;
|
||||
}
|
||||
|
||||
/// Gets the current status of the process
|
||||
ProcessStatus GetStatus() const {
|
||||
return status;
|
||||
}
|
||||
|
||||
/// Gets the unique ID that identifies this particular process.
|
||||
u64 GetProcessID() const {
|
||||
return process_id;
|
||||
}
|
||||
|
||||
/// Gets the title ID corresponding to this process.
|
||||
u64 GetTitleID() const {
|
||||
return program_id;
|
||||
}
|
||||
|
||||
/// Gets the resource limit descriptor for this process
|
||||
std::shared_ptr<ResourceLimit> GetResourceLimit() const;
|
||||
|
||||
/// Gets the ideal CPU core ID for this process
|
||||
u8 GetIdealCore() const {
|
||||
return ideal_core;
|
||||
}
|
||||
|
||||
/// Gets the bitmask of allowed cores that this process' threads can run on.
|
||||
u64 GetCoreMask() const {
|
||||
return capabilities.GetCoreMask();
|
||||
}
|
||||
|
||||
/// Gets the bitmask of allowed thread priorities.
|
||||
u64 GetPriorityMask() const {
|
||||
return capabilities.GetPriorityMask();
|
||||
}
|
||||
|
||||
/// Gets the amount of secure memory to allocate for memory management.
|
||||
u32 GetSystemResourceSize() const {
|
||||
return system_resource_size;
|
||||
}
|
||||
|
||||
/// Gets the amount of secure memory currently in use for memory management.
|
||||
u32 GetSystemResourceUsage() const {
|
||||
// On hardware, this returns the amount of system resource memory that has
|
||||
// been used by the kernel. This is problematic for Yuzu to emulate, because
|
||||
// system resource memory is used for page tables -- and yuzu doesn't really
|
||||
// have a way to calculate how much memory is required for page tables for
|
||||
// the current process at any given time.
|
||||
// TODO: Is this even worth implementing? Games may retrieve this value via
|
||||
// an SDK function that gets used + available system resource size for debug
|
||||
// or diagnostic purposes. However, it seems unlikely that a game would make
|
||||
// decisions based on how much system memory is dedicated to its page tables.
|
||||
// Is returning a value other than zero wise?
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Whether this process is an AArch64 or AArch32 process.
|
||||
bool Is64BitProcess() const {
|
||||
return is_64bit_process;
|
||||
}
|
||||
|
||||
/// Gets the total running time of the process instance in ticks.
|
||||
u64 GetCPUTimeTicks() const {
|
||||
return total_process_running_time_ticks;
|
||||
}
|
||||
|
||||
/// Updates the total running time, adding the given ticks to it.
|
||||
void UpdateCPUTimeTicks(u64 ticks) {
|
||||
total_process_running_time_ticks += ticks;
|
||||
}
|
||||
|
||||
/// Gets the process schedule count, used for thread yelding
|
||||
s64 GetScheduledCount() const {
|
||||
return schedule_count;
|
||||
}
|
||||
|
||||
/// Increments the process schedule count, used for thread yielding.
|
||||
void IncrementScheduledCount() {
|
||||
++schedule_count;
|
||||
}
|
||||
|
||||
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
|
||||
u64 GetRandomEntropy(std::size_t index) const {
|
||||
return random_entropy.at(index);
|
||||
}
|
||||
|
||||
/// Retrieves the total physical memory available to this process in bytes.
|
||||
u64 GetTotalPhysicalMemoryAvailable() const;
|
||||
|
||||
/// Retrieves the total physical memory available to this process in bytes,
|
||||
/// without the size of the personal system resource heap added to it.
|
||||
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const;
|
||||
|
||||
/// Retrieves the total physical memory used by this process in bytes.
|
||||
u64 GetTotalPhysicalMemoryUsed() const;
|
||||
|
||||
/// Retrieves the total physical memory used by this process in bytes,
|
||||
/// without the size of the personal system resource heap added to it.
|
||||
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
|
||||
|
||||
/// Gets the list of all threads created with this process as their owner.
|
||||
const std::list<const Thread*>& GetThreadList() const {
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
/// Insert a thread into the condition variable wait container
|
||||
void InsertConditionVariableThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Remove a thread from the condition variable wait container
|
||||
void RemoveConditionVariableThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Obtain all condition variable threads waiting for some address
|
||||
std::vector<std::shared_ptr<Thread>> GetConditionVariableThreads(VAddr cond_var_addr);
|
||||
|
||||
/// Registers a thread as being created under this process,
|
||||
/// adding it to this process' thread list.
|
||||
void RegisterThread(const Thread* thread);
|
||||
|
||||
/// Unregisters a thread from this process, removing it
|
||||
/// from this process' thread list.
|
||||
void UnregisterThread(const Thread* thread);
|
||||
|
||||
/// Clears the signaled state of the process if and only if it's signaled.
|
||||
///
|
||||
/// @pre The process must not be already terminated. If this is called on a
|
||||
/// terminated process, then ERR_INVALID_STATE will be returned.
|
||||
///
|
||||
/// @pre The process must be in a signaled state. If this is called on a
|
||||
/// process instance that is not signaled, ERR_INVALID_STATE will be
|
||||
/// returned.
|
||||
ResultCode ClearSignalState();
|
||||
|
||||
/**
|
||||
* Loads process-specifics configuration info with metadata provided
|
||||
* by an executable.
|
||||
*
|
||||
* @param metadata The provided metadata to load process specific info from.
|
||||
*
|
||||
* @returns RESULT_SUCCESS if all relevant metadata was able to be
|
||||
* loaded and parsed. Otherwise, an error code is returned.
|
||||
*/
|
||||
ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
|
||||
|
||||
/**
|
||||
* Starts the main application thread for this process.
|
||||
*
|
||||
* @param main_thread_priority The priority for the main thread.
|
||||
* @param stack_size The stack size for the main thread in bytes.
|
||||
*/
|
||||
void Run(s32 main_thread_priority, u64 stack_size);
|
||||
|
||||
/**
|
||||
* Prepares a process for termination by stopping all of its threads
|
||||
* and clearing any other resources.
|
||||
*/
|
||||
void PrepareForTermination();
|
||||
|
||||
void LoadModule(CodeSet code_set, VAddr base_addr);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Thread-local storage management
|
||||
|
||||
// Marks the next available region as used and returns the address of the slot.
|
||||
[[nodiscard]] VAddr CreateTLSRegion();
|
||||
|
||||
// Frees a used TLS slot identified by the given address
|
||||
void FreeTLSRegion(VAddr tls_address);
|
||||
|
||||
private:
|
||||
/// Checks if the specified thread should wait until this process is available.
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
/// Acquires/locks this process for the specified thread if it's available.
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
/// Changes the process status. If the status is different
|
||||
/// from the current process status, then this will trigger
|
||||
/// a process signal.
|
||||
void ChangeStatus(ProcessStatus new_status);
|
||||
|
||||
/// Allocates the main thread stack for the process, given the stack size in bytes.
|
||||
ResultCode AllocateMainThreadStack(std::size_t stack_size);
|
||||
|
||||
/// Memory manager for this process
|
||||
std::unique_ptr<Memory::PageTable> page_table;
|
||||
|
||||
/// Current status of the process
|
||||
ProcessStatus status{};
|
||||
|
||||
/// The ID of this process
|
||||
u64 process_id = 0;
|
||||
|
||||
/// Title ID corresponding to the process
|
||||
u64 program_id = 0;
|
||||
|
||||
/// Specifies additional memory to be reserved for the process's memory management by the
|
||||
/// system. When this is non-zero, secure memory is allocated and used for page table allocation
|
||||
/// instead of using the normal global page tables/memory block management.
|
||||
u32 system_resource_size = 0;
|
||||
|
||||
/// Resource limit descriptor for this process
|
||||
std::shared_ptr<ResourceLimit> resource_limit;
|
||||
|
||||
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
||||
u8 ideal_core = 0;
|
||||
|
||||
/// The Thread Local Storage area is allocated as processes create threads,
|
||||
/// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
|
||||
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
|
||||
/// page as a bitmask.
|
||||
/// This vector will grow as more pages are allocated for new threads.
|
||||
std::vector<TLSPage> tls_pages;
|
||||
|
||||
/// Contains the parsed process capability descriptors.
|
||||
ProcessCapabilities capabilities;
|
||||
|
||||
/// Whether or not this process is AArch64, or AArch32.
|
||||
/// By default, we currently assume this is true, unless otherwise
|
||||
/// specified by metadata provided to the process during loading.
|
||||
bool is_64bit_process = true;
|
||||
|
||||
/// Total running time for the process in ticks.
|
||||
u64 total_process_running_time_ticks = 0;
|
||||
|
||||
/// Per-process handle table for storing created object handles in.
|
||||
HandleTable handle_table;
|
||||
|
||||
/// Per-process address arbiter.
|
||||
AddressArbiter address_arbiter;
|
||||
|
||||
/// The per-process mutex lock instance used for handling various
|
||||
/// forms of services, such as lock arbitration, and condition
|
||||
/// variable related facilities.
|
||||
Mutex mutex;
|
||||
|
||||
/// Address indicating the location of the process' dedicated TLS region.
|
||||
VAddr tls_region_address = 0;
|
||||
|
||||
/// Random values for svcGetInfo RandomEntropy
|
||||
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
|
||||
|
||||
/// List of threads that are running with this process as their owner.
|
||||
std::list<const Thread*> thread_list;
|
||||
|
||||
/// List of threads waiting for a condition variable
|
||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> cond_var_threads;
|
||||
|
||||
/// Address of the top of the main thread's stack
|
||||
VAddr main_thread_stack_top{};
|
||||
|
||||
/// Size of the main thread's stack
|
||||
std::size_t main_thread_stack_size{};
|
||||
|
||||
/// Memory usage capacity for the process
|
||||
std::size_t memory_usage_capacity{};
|
||||
|
||||
/// Process total image size
|
||||
std::size_t image_size{};
|
||||
|
||||
/// Name of this process
|
||||
std::string name;
|
||||
|
||||
/// Schedule count of this process
|
||||
s64 schedule_count{};
|
||||
|
||||
/// System context
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
385
src/core/hle/kernel/process_capability.cpp
Executable file
385
src/core/hle/kernel/process_capability.cpp
Executable file
@@ -0,0 +1,385 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/bit_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/process_capability.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
|
||||
// clang-format off
|
||||
|
||||
// Shift offsets for kernel capability types.
|
||||
enum : u32 {
|
||||
CapabilityOffset_PriorityAndCoreNum = 3,
|
||||
CapabilityOffset_Syscall = 4,
|
||||
CapabilityOffset_MapPhysical = 6,
|
||||
CapabilityOffset_MapIO = 7,
|
||||
CapabilityOffset_Interrupt = 11,
|
||||
CapabilityOffset_ProgramType = 13,
|
||||
CapabilityOffset_KernelVersion = 14,
|
||||
CapabilityOffset_HandleTableSize = 15,
|
||||
CapabilityOffset_Debug = 16,
|
||||
};
|
||||
|
||||
// Combined mask of all parameters that may be initialized only once.
|
||||
constexpr u32 InitializeOnceMask = (1U << CapabilityOffset_PriorityAndCoreNum) |
|
||||
(1U << CapabilityOffset_ProgramType) |
|
||||
(1U << CapabilityOffset_KernelVersion) |
|
||||
(1U << CapabilityOffset_HandleTableSize) |
|
||||
(1U << CapabilityOffset_Debug);
|
||||
|
||||
// Packed kernel version indicating 10.4.0
|
||||
constexpr u32 PackedKernelVersion = 0x520000;
|
||||
|
||||
// Indicates possible types of capabilities that can be specified.
|
||||
enum class CapabilityType : u32 {
|
||||
Unset = 0U,
|
||||
PriorityAndCoreNum = (1U << CapabilityOffset_PriorityAndCoreNum) - 1,
|
||||
Syscall = (1U << CapabilityOffset_Syscall) - 1,
|
||||
MapPhysical = (1U << CapabilityOffset_MapPhysical) - 1,
|
||||
MapIO = (1U << CapabilityOffset_MapIO) - 1,
|
||||
Interrupt = (1U << CapabilityOffset_Interrupt) - 1,
|
||||
ProgramType = (1U << CapabilityOffset_ProgramType) - 1,
|
||||
KernelVersion = (1U << CapabilityOffset_KernelVersion) - 1,
|
||||
HandleTableSize = (1U << CapabilityOffset_HandleTableSize) - 1,
|
||||
Debug = (1U << CapabilityOffset_Debug) - 1,
|
||||
Ignorable = 0xFFFFFFFFU,
|
||||
};
|
||||
|
||||
// clang-format on
|
||||
|
||||
constexpr CapabilityType GetCapabilityType(u32 value) {
|
||||
return static_cast<CapabilityType>((~value & (value + 1)) - 1);
|
||||
}
|
||||
|
||||
u32 GetFlagBitOffset(CapabilityType type) {
|
||||
const auto value = static_cast<u32>(type);
|
||||
return static_cast<u32>(Common::BitSize<u32>() - Common::CountLeadingZeroes32(value));
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table) {
|
||||
Clear();
|
||||
|
||||
// Allow all cores and priorities.
|
||||
core_mask = 0xF;
|
||||
priority_mask = 0xFFFFFFFFFFFFFFFF;
|
||||
kernel_version = PackedKernelVersion;
|
||||
|
||||
return ParseCapabilities(capabilities, num_capabilities, page_table);
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table) {
|
||||
Clear();
|
||||
|
||||
return ParseCapabilities(capabilities, num_capabilities, page_table);
|
||||
}
|
||||
|
||||
void ProcessCapabilities::InitializeForMetadatalessProcess() {
|
||||
// Allow all cores and priorities
|
||||
core_mask = 0xF;
|
||||
priority_mask = 0xFFFFFFFFFFFFFFFF;
|
||||
kernel_version = PackedKernelVersion;
|
||||
|
||||
// Allow all system calls and interrupts.
|
||||
svc_capabilities.set();
|
||||
interrupt_capabilities.set();
|
||||
|
||||
// Allow using the maximum possible amount of handles
|
||||
handle_table_size = static_cast<s32>(HandleTable::MAX_COUNT);
|
||||
|
||||
// Allow all debugging capabilities.
|
||||
is_debuggable = true;
|
||||
can_force_debug = true;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table) {
|
||||
u32 set_flags = 0;
|
||||
u32 set_svc_bits = 0;
|
||||
|
||||
for (std::size_t i = 0; i < num_capabilities; ++i) {
|
||||
const u32 descriptor = capabilities[i];
|
||||
const auto type = GetCapabilityType(descriptor);
|
||||
|
||||
if (type == CapabilityType::MapPhysical) {
|
||||
i++;
|
||||
|
||||
// The MapPhysical type uses two descriptor flags for its parameters.
|
||||
// If there's only one, then there's a problem.
|
||||
if (i >= num_capabilities) {
|
||||
LOG_ERROR(Kernel, "Invalid combination! i={}", i);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
|
||||
const auto size_flags = capabilities[i];
|
||||
if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
|
||||
LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
|
||||
const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
|
||||
if (result.IsError()) {
|
||||
LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}",
|
||||
descriptor, size_flags);
|
||||
return result;
|
||||
}
|
||||
} else {
|
||||
const auto result =
|
||||
ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table);
|
||||
if (result.IsError()) {
|
||||
LOG_ERROR(
|
||||
Kernel,
|
||||
"Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}",
|
||||
set_flags, set_svc_bits, descriptor);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits,
|
||||
u32 flag, Memory::PageTable& page_table) {
|
||||
const auto type = GetCapabilityType(flag);
|
||||
|
||||
if (type == CapabilityType::Unset) {
|
||||
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
|
||||
}
|
||||
|
||||
// Bail early on ignorable entries, as one would expect,
|
||||
// ignorable descriptors can be ignored.
|
||||
if (type == CapabilityType::Ignorable) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
// Ensure that the give flag hasn't already been initialized before.
|
||||
// If it has been, then bail.
|
||||
const u32 flag_length = GetFlagBitOffset(type);
|
||||
const u32 set_flag = 1U << flag_length;
|
||||
if ((set_flag & set_flags & InitializeOnceMask) != 0) {
|
||||
LOG_ERROR(Kernel,
|
||||
"Attempted to initialize flags that may only be initialized once. set_flags={}",
|
||||
set_flags);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
set_flags |= set_flag;
|
||||
|
||||
switch (type) {
|
||||
case CapabilityType::PriorityAndCoreNum:
|
||||
return HandlePriorityCoreNumFlags(flag);
|
||||
case CapabilityType::Syscall:
|
||||
return HandleSyscallFlags(set_svc_bits, flag);
|
||||
case CapabilityType::MapIO:
|
||||
return HandleMapIOFlags(flag, page_table);
|
||||
case CapabilityType::Interrupt:
|
||||
return HandleInterruptFlags(flag);
|
||||
case CapabilityType::ProgramType:
|
||||
return HandleProgramTypeFlags(flag);
|
||||
case CapabilityType::KernelVersion:
|
||||
return HandleKernelVersionFlags(flag);
|
||||
case CapabilityType::HandleTableSize:
|
||||
return HandleHandleTableFlags(flag);
|
||||
case CapabilityType::Debug:
|
||||
return HandleDebugFlags(flag);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
|
||||
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
|
||||
}
|
||||
|
||||
void ProcessCapabilities::Clear() {
|
||||
svc_capabilities.reset();
|
||||
interrupt_capabilities.reset();
|
||||
|
||||
core_mask = 0;
|
||||
priority_mask = 0;
|
||||
|
||||
handle_table_size = 0;
|
||||
kernel_version = 0;
|
||||
|
||||
program_type = ProgramType::SysModule;
|
||||
|
||||
is_debuggable = false;
|
||||
can_force_debug = false;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
|
||||
if (priority_mask != 0 || core_mask != 0) {
|
||||
LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
|
||||
priority_mask, core_mask);
|
||||
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
|
||||
}
|
||||
|
||||
const u32 core_num_min = (flags >> 16) & 0xFF;
|
||||
const u32 core_num_max = (flags >> 24) & 0xFF;
|
||||
if (core_num_min > core_num_max) {
|
||||
LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
|
||||
core_num_min, core_num_max);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
|
||||
const u32 priority_min = (flags >> 10) & 0x3F;
|
||||
const u32 priority_max = (flags >> 4) & 0x3F;
|
||||
if (priority_min > priority_max) {
|
||||
LOG_ERROR(Kernel,
|
||||
"Priority min is greater than priority max! priority_min={}, priority_max={}",
|
||||
core_num_min, priority_max);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
|
||||
// The switch only has 4 usable cores.
|
||||
if (core_num_max >= 4) {
|
||||
LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
|
||||
return ERR_INVALID_PROCESSOR_ID;
|
||||
}
|
||||
|
||||
const auto make_mask = [](u64 min, u64 max) {
|
||||
const u64 range = max - min + 1;
|
||||
const u64 mask = (1ULL << range) - 1;
|
||||
|
||||
return mask << min;
|
||||
};
|
||||
|
||||
core_mask = make_mask(core_num_min, core_num_max);
|
||||
priority_mask = make_mask(priority_min, priority_max);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
|
||||
const u32 index = flags >> 29;
|
||||
const u32 svc_bit = 1U << index;
|
||||
|
||||
// If we've already set this svc before, bail.
|
||||
if ((set_svc_bits & svc_bit) != 0) {
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
set_svc_bits |= svc_bit;
|
||||
|
||||
const u32 svc_mask = (flags >> 5) & 0xFFFFFF;
|
||||
for (u32 i = 0; i < 24; ++i) {
|
||||
const u32 svc_number = index * 24 + i;
|
||||
|
||||
if ((svc_mask & (1U << i)) == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (svc_number >= svc_capabilities.size()) {
|
||||
LOG_ERROR(Kernel, "Process svc capability is out of range! svc_number={}", svc_number);
|
||||
return ERR_OUT_OF_RANGE;
|
||||
}
|
||||
|
||||
svc_capabilities[svc_number] = true;
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
|
||||
Memory::PageTable& page_table) {
|
||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, Memory::PageTable& page_table) {
|
||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
|
||||
constexpr u32 interrupt_ignore_value = 0x3FF;
|
||||
const u32 interrupt0 = (flags >> 12) & 0x3FF;
|
||||
const u32 interrupt1 = (flags >> 22) & 0x3FF;
|
||||
|
||||
for (u32 interrupt : {interrupt0, interrupt1}) {
|
||||
if (interrupt == interrupt_ignore_value) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// NOTE:
|
||||
// This should be checking a generic interrupt controller value
|
||||
// as part of the calculation, however, given we don't currently
|
||||
// emulate that, it's sufficient to mark every interrupt as defined.
|
||||
|
||||
if (interrupt >= interrupt_capabilities.size()) {
|
||||
LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
|
||||
interrupt);
|
||||
return ERR_OUT_OF_RANGE;
|
||||
}
|
||||
|
||||
interrupt_capabilities[interrupt] = true;
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
|
||||
const u32 reserved = flags >> 17;
|
||||
if (reserved != 0) {
|
||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
||||
return ERR_RESERVED_VALUE;
|
||||
}
|
||||
|
||||
program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
|
||||
// Yes, the internal member variable is checked in the actual kernel here.
|
||||
// This might look odd for options that are only allowed to be initialized
|
||||
// just once, however the kernel has a separate initialization function for
|
||||
// kernel processes and userland processes. The kernel variant sets this
|
||||
// member variable ahead of time.
|
||||
|
||||
const u32 major_version = kernel_version >> 19;
|
||||
|
||||
if (major_version != 0 || flags < 0x80000) {
|
||||
LOG_ERROR(Kernel,
|
||||
"Kernel version is non zero or flags are too small! major_version={}, flags={}",
|
||||
major_version, flags);
|
||||
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
|
||||
}
|
||||
|
||||
kernel_version = flags;
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
|
||||
const u32 reserved = flags >> 26;
|
||||
if (reserved != 0) {
|
||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
||||
return ERR_RESERVED_VALUE;
|
||||
}
|
||||
|
||||
handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
|
||||
const u32 reserved = flags >> 19;
|
||||
if (reserved != 0) {
|
||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
||||
return ERR_RESERVED_VALUE;
|
||||
}
|
||||
|
||||
is_debuggable = (flags & 0x20000) != 0;
|
||||
can_force_debug = (flags & 0x40000) != 0;
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
266
src/core/hle/kernel/process_capability.h
Executable file
266
src/core/hle/kernel/process_capability.h
Executable file
@@ -0,0 +1,266 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <bitset>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace Memory {
|
||||
class PageTable;
|
||||
}
|
||||
|
||||
/// The possible types of programs that may be indicated
|
||||
/// by the program type capability descriptor.
|
||||
enum class ProgramType {
|
||||
SysModule,
|
||||
Application,
|
||||
Applet,
|
||||
};
|
||||
|
||||
/// Handles kernel capability descriptors that are provided by
|
||||
/// application metadata. These descriptors provide information
|
||||
/// that alters certain parameters for kernel process instance
|
||||
/// that will run said application (or applet).
|
||||
///
|
||||
/// Capabilities are a sequence of flag descriptors, that indicate various
|
||||
/// configurations and constraints for a particular process.
|
||||
///
|
||||
/// Flag types are indicated by a sequence of set low bits. E.g. the
|
||||
/// types are indicated with the low bits as follows (where x indicates "don't care"):
|
||||
///
|
||||
/// - Priority and core mask : 0bxxxxxxxxxxxx0111
|
||||
/// - Allowed service call mask: 0bxxxxxxxxxxx01111
|
||||
/// - Map physical memory : 0bxxxxxxxxx0111111
|
||||
/// - Map IO memory : 0bxxxxxxxx01111111
|
||||
/// - Interrupts : 0bxxxx011111111111
|
||||
/// - Application type : 0bxx01111111111111
|
||||
/// - Kernel version : 0bx011111111111111
|
||||
/// - Handle table size : 0b0111111111111111
|
||||
/// - Debugger flags : 0b1111111111111111
|
||||
///
|
||||
/// These are essentially a bit offset subtracted by 1 to create a mask.
|
||||
/// e.g. The first entry in the above list is simply bit 3 (value 8 -> 0b1000)
|
||||
/// subtracted by one (7 -> 0b0111)
|
||||
///
|
||||
/// An example of a bit layout (using the map physical layout):
|
||||
/// <example>
|
||||
/// The MapPhysical type indicates a sequence entry pair of:
|
||||
///
|
||||
/// [initial, memory_flags], where:
|
||||
///
|
||||
/// initial:
|
||||
/// bits:
|
||||
/// 7-24: Starting page to map memory at.
|
||||
/// 25 : Indicates if the memory should be mapped as read only.
|
||||
///
|
||||
/// memory_flags:
|
||||
/// bits:
|
||||
/// 7-20 : Number of pages to map
|
||||
/// 21-25: Seems to be reserved (still checked against though)
|
||||
/// 26 : Whether or not the memory being mapped is IO memory, or physical memory
|
||||
/// </example>
|
||||
///
|
||||
class ProcessCapabilities {
|
||||
public:
|
||||
using InterruptCapabilities = std::bitset<1024>;
|
||||
using SyscallCapabilities = std::bitset<128>;
|
||||
|
||||
ProcessCapabilities() = default;
|
||||
ProcessCapabilities(const ProcessCapabilities&) = delete;
|
||||
ProcessCapabilities(ProcessCapabilities&&) = default;
|
||||
|
||||
ProcessCapabilities& operator=(const ProcessCapabilities&) = delete;
|
||||
ProcessCapabilities& operator=(ProcessCapabilities&&) = default;
|
||||
|
||||
/// Initializes this process capabilities instance for a kernel process.
|
||||
///
|
||||
/// @param capabilities The capabilities to parse
|
||||
/// @param num_capabilities The number of capabilities to parse.
|
||||
/// @param page_table The memory manager to use for handling any mapping-related
|
||||
/// operations (such as mapping IO memory, etc).
|
||||
///
|
||||
/// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized,
|
||||
/// otherwise, an error code upon failure.
|
||||
///
|
||||
ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table);
|
||||
|
||||
/// Initializes this process capabilities instance for a userland process.
|
||||
///
|
||||
/// @param capabilities The capabilities to parse.
|
||||
/// @param num_capabilities The total number of capabilities to parse.
|
||||
/// @param page_table The memory manager to use for handling any mapping-related
|
||||
/// operations (such as mapping IO memory, etc).
|
||||
///
|
||||
/// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized,
|
||||
/// otherwise, an error code upon failure.
|
||||
///
|
||||
ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table);
|
||||
|
||||
/// Initializes this process capabilities instance for a process that does not
|
||||
/// have any metadata to parse.
|
||||
///
|
||||
/// This is necessary, as we allow running raw executables, and the internal
|
||||
/// kernel process capabilities also determine what CPU cores the process is
|
||||
/// allowed to run on, and what priorities are allowed for threads. It also
|
||||
/// determines the max handle table size, what the program type is, whether or
|
||||
/// not the process can be debugged, or whether it's possible for a process to
|
||||
/// forcibly debug another process.
|
||||
///
|
||||
/// Given the above, this essentially enables all capabilities across the board
|
||||
/// for the process. It allows the process to:
|
||||
///
|
||||
/// - Run on any core
|
||||
/// - Use any thread priority
|
||||
/// - Use the maximum amount of handles a process is allowed to.
|
||||
/// - Be debuggable
|
||||
/// - Forcibly debug other processes.
|
||||
///
|
||||
/// Note that this is not a behavior that the kernel allows a process to do via
|
||||
/// a single function like this. This is yuzu-specific behavior to handle
|
||||
/// executables with no capability descriptors whatsoever to derive behavior from.
|
||||
/// It being yuzu-specific is why this is also not the default behavior and not
|
||||
/// done by default in the constructor.
|
||||
///
|
||||
void InitializeForMetadatalessProcess();
|
||||
|
||||
/// Gets the allowable core mask
|
||||
u64 GetCoreMask() const {
|
||||
return core_mask;
|
||||
}
|
||||
|
||||
/// Gets the allowable priority mask
|
||||
u64 GetPriorityMask() const {
|
||||
return priority_mask;
|
||||
}
|
||||
|
||||
/// Gets the SVC access permission bits
|
||||
const SyscallCapabilities& GetServiceCapabilities() const {
|
||||
return svc_capabilities;
|
||||
}
|
||||
|
||||
/// Gets the valid interrupt bits.
|
||||
const InterruptCapabilities& GetInterruptCapabilities() const {
|
||||
return interrupt_capabilities;
|
||||
}
|
||||
|
||||
/// Gets the program type for this process.
|
||||
ProgramType GetProgramType() const {
|
||||
return program_type;
|
||||
}
|
||||
|
||||
/// Gets the number of total allowable handles for the process' handle table.
|
||||
s32 GetHandleTableSize() const {
|
||||
return handle_table_size;
|
||||
}
|
||||
|
||||
/// Gets the kernel version value.
|
||||
u32 GetKernelVersion() const {
|
||||
return kernel_version;
|
||||
}
|
||||
|
||||
/// Whether or not this process can be debugged.
|
||||
bool IsDebuggable() const {
|
||||
return is_debuggable;
|
||||
}
|
||||
|
||||
/// Whether or not this process can forcibly debug another
|
||||
/// process, even if that process is not considered debuggable.
|
||||
bool CanForceDebug() const {
|
||||
return can_force_debug;
|
||||
}
|
||||
|
||||
private:
|
||||
/// Attempts to parse a given sequence of capability descriptors.
|
||||
///
|
||||
/// @param capabilities The sequence of capability descriptors to parse.
|
||||
/// @param num_capabilities The number of descriptors within the given sequence.
|
||||
/// @param page_table The memory manager that will perform any memory
|
||||
/// mapping if necessary.
|
||||
///
|
||||
/// @return RESULT_SUCCESS if no errors occur, otherwise an error code.
|
||||
///
|
||||
ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table);
|
||||
|
||||
/// Attempts to parse a capability descriptor that is only represented by a
|
||||
/// single flag set.
|
||||
///
|
||||
/// @param set_flags Running set of flags that are used to catch
|
||||
/// flags being initialized more than once when they shouldn't be.
|
||||
/// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask.
|
||||
/// @param flag The flag to attempt to parse.
|
||||
/// @param page_table The memory manager that will perform any memory
|
||||
/// mapping if necessary.
|
||||
///
|
||||
/// @return RESULT_SUCCESS if no errors occurred, otherwise an error code.
|
||||
///
|
||||
ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
|
||||
Memory::PageTable& page_table);
|
||||
|
||||
/// Clears the internal state of this process capability instance. Necessary,
|
||||
/// to have a sane starting point due to us allowing running executables without
|
||||
/// configuration metadata. We assume a process is not going to have metadata,
|
||||
/// and if it turns out that the process does, in fact, have metadata, then
|
||||
/// we attempt to parse it. Thus, we need this to reset data members back to
|
||||
/// a good state.
|
||||
///
|
||||
/// DO NOT ever make this a public member function. This isn't an invariant
|
||||
/// anything external should depend upon (and if anything comes to rely on it,
|
||||
/// you should immediately be questioning the design of that thing, not this
|
||||
/// class. If the kernel itself can run without depending on behavior like that,
|
||||
/// then so can yuzu).
|
||||
///
|
||||
void Clear();
|
||||
|
||||
/// Handles flags related to the priority and core number capability flags.
|
||||
ResultCode HandlePriorityCoreNumFlags(u32 flags);
|
||||
|
||||
/// Handles flags related to determining the allowable SVC mask.
|
||||
ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags);
|
||||
|
||||
/// Handles flags related to mapping physical memory pages.
|
||||
ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, Memory::PageTable& page_table);
|
||||
|
||||
/// Handles flags related to mapping IO pages.
|
||||
ResultCode HandleMapIOFlags(u32 flags, Memory::PageTable& page_table);
|
||||
|
||||
/// Handles flags related to the interrupt capability flags.
|
||||
ResultCode HandleInterruptFlags(u32 flags);
|
||||
|
||||
/// Handles flags related to the program type.
|
||||
ResultCode HandleProgramTypeFlags(u32 flags);
|
||||
|
||||
/// Handles flags related to the handle table size.
|
||||
ResultCode HandleHandleTableFlags(u32 flags);
|
||||
|
||||
/// Handles flags related to the kernel version capability flags.
|
||||
ResultCode HandleKernelVersionFlags(u32 flags);
|
||||
|
||||
/// Handles flags related to debug-specific capabilities.
|
||||
ResultCode HandleDebugFlags(u32 flags);
|
||||
|
||||
SyscallCapabilities svc_capabilities;
|
||||
InterruptCapabilities interrupt_capabilities;
|
||||
|
||||
u64 core_mask = 0;
|
||||
u64 priority_mask = 0;
|
||||
|
||||
s32 handle_table_size = 0;
|
||||
u32 kernel_version = 0;
|
||||
|
||||
ProgramType program_type = ProgramType::SysModule;
|
||||
|
||||
bool is_debuggable = false;
|
||||
bool can_force_debug = false;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
54
src/core/hle/kernel/readable_event.cpp
Executable file
54
src/core/hle/kernel/readable_event.cpp
Executable file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ReadableEvent::ReadableEvent(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
ReadableEvent::~ReadableEvent() = default;
|
||||
|
||||
bool ReadableEvent::ShouldWait(const Thread* thread) const {
|
||||
return !is_signaled;
|
||||
}
|
||||
|
||||
void ReadableEvent::Acquire(Thread* thread) {
|
||||
ASSERT_MSG(IsSignaled(), "object unavailable!");
|
||||
}
|
||||
|
||||
void ReadableEvent::Signal() {
|
||||
if (is_signaled) {
|
||||
return;
|
||||
}
|
||||
|
||||
is_signaled = true;
|
||||
SynchronizationObject::Signal();
|
||||
}
|
||||
|
||||
void ReadableEvent::Clear() {
|
||||
is_signaled = false;
|
||||
}
|
||||
|
||||
ResultCode ReadableEvent::Reset() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (!is_signaled) {
|
||||
LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
|
||||
GetObjectId(), GetTypeName(), GetName());
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
Clear();
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
57
src/core/hle/kernel/readable_event.h
Executable file
57
src/core/hle/kernel/readable_event.h
Executable file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class WritableEvent;
|
||||
|
||||
class ReadableEvent final : public SynchronizationObject {
|
||||
friend class WritableEvent;
|
||||
|
||||
public:
|
||||
~ReadableEvent() override;
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ReadableEvent";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
/// Unconditionally clears the readable event's state.
|
||||
void Clear();
|
||||
|
||||
/// Clears the readable event's state if and only if it
|
||||
/// has already been signaled.
|
||||
///
|
||||
/// @pre The event must be in a signaled state. If this event
|
||||
/// is in an unsignaled state and this function is called,
|
||||
/// then ERR_INVALID_STATE will be returned.
|
||||
ResultCode Reset();
|
||||
|
||||
void Signal() override;
|
||||
|
||||
private:
|
||||
explicit ReadableEvent(KernelCore& kernel);
|
||||
|
||||
std::string name; ///< Name of event (optional)
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
73
src/core/hle/kernel/resource_limit.cpp
Executable file
73
src/core/hle/kernel/resource_limit.cpp
Executable file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
constexpr std::size_t ResourceTypeToIndex(ResourceType type) {
|
||||
return static_cast<std::size_t>(type);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {}
|
||||
ResourceLimit::~ResourceLimit() = default;
|
||||
|
||||
bool ResourceLimit::Reserve(ResourceType resource, s64 amount) {
|
||||
return Reserve(resource, amount, 10000000000);
|
||||
}
|
||||
|
||||
bool ResourceLimit::Reserve(ResourceType resource, s64 amount, u64 timeout) {
|
||||
const std::size_t index{ResourceTypeToIndex(resource)};
|
||||
|
||||
s64 new_value = current[index] + amount;
|
||||
if (new_value > limit[index] && available[index] + amount <= limit[index]) {
|
||||
// TODO(bunnei): This is wrong for multicore, we should wait the calling thread for timeout
|
||||
new_value = current[index] + amount;
|
||||
}
|
||||
|
||||
if (new_value <= limit[index]) {
|
||||
current[index] = new_value;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void ResourceLimit::Release(ResourceType resource, u64 amount) {
|
||||
Release(resource, amount, amount);
|
||||
}
|
||||
|
||||
void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) {
|
||||
const std::size_t index{ResourceTypeToIndex(resource)};
|
||||
|
||||
current[index] -= used_amount;
|
||||
available[index] -= available_amount;
|
||||
}
|
||||
|
||||
std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
|
||||
return std::make_shared<ResourceLimit>(kernel);
|
||||
}
|
||||
|
||||
s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
|
||||
return limit.at(ResourceTypeToIndex(resource)) - current.at(ResourceTypeToIndex(resource));
|
||||
}
|
||||
|
||||
s64 ResourceLimit::GetMaxResourceValue(ResourceType resource) const {
|
||||
return limit.at(ResourceTypeToIndex(resource));
|
||||
}
|
||||
|
||||
ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) {
|
||||
const std::size_t index{ResourceTypeToIndex(resource)};
|
||||
if (current[index] <= value) {
|
||||
limit[index] = value;
|
||||
return RESULT_SUCCESS;
|
||||
} else {
|
||||
LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}", resource,
|
||||
value, index);
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
}
|
||||
} // namespace Kernel
|
||||
104
src/core/hle/kernel/resource_limit.h
Executable file
104
src/core/hle/kernel/resource_limit.h
Executable file
@@ -0,0 +1,104 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
enum class ResourceType : u32 {
|
||||
PhysicalMemory,
|
||||
Threads,
|
||||
Events,
|
||||
TransferMemory,
|
||||
Sessions,
|
||||
|
||||
// Used as a count, not an actual type.
|
||||
ResourceTypeCount
|
||||
};
|
||||
|
||||
constexpr bool IsValidResourceType(ResourceType type) {
|
||||
return type < ResourceType::ResourceTypeCount;
|
||||
}
|
||||
|
||||
class ResourceLimit final : public Object {
|
||||
public:
|
||||
explicit ResourceLimit(KernelCore& kernel);
|
||||
~ResourceLimit() override;
|
||||
|
||||
/// Creates a resource limit object.
|
||||
static std::shared_ptr<ResourceLimit> Create(KernelCore& kernel);
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ResourceLimit";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return GetTypeName();
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
bool Reserve(ResourceType resource, s64 amount);
|
||||
bool Reserve(ResourceType resource, s64 amount, u64 timeout);
|
||||
void Release(ResourceType resource, u64 amount);
|
||||
void Release(ResourceType resource, u64 used_amount, u64 available_amount);
|
||||
|
||||
/**
|
||||
* Gets the current value for the specified resource.
|
||||
* @param resource Requested resource type
|
||||
* @returns The current value of the resource type
|
||||
*/
|
||||
s64 GetCurrentResourceValue(ResourceType resource) const;
|
||||
|
||||
/**
|
||||
* Gets the max value for the specified resource.
|
||||
* @param resource Requested resource type
|
||||
* @returns The max value of the resource type
|
||||
*/
|
||||
s64 GetMaxResourceValue(ResourceType resource) const;
|
||||
|
||||
/**
|
||||
* Sets the limit value for a given resource type.
|
||||
*
|
||||
* @param resource The resource type to apply the limit to.
|
||||
* @param value The limit to apply to the given resource type.
|
||||
*
|
||||
* @return A result code indicating if setting the limit value
|
||||
* was successful or not.
|
||||
*
|
||||
* @note The supplied limit value *must* be greater than or equal to
|
||||
* the current resource value for the given resource type,
|
||||
* otherwise ERR_INVALID_STATE will be returned.
|
||||
*/
|
||||
ResultCode SetLimitValue(ResourceType resource, s64 value);
|
||||
|
||||
private:
|
||||
// TODO(Subv): Increment resource limit current values in their respective Kernel::T::Create
|
||||
// functions
|
||||
//
|
||||
// Currently we have no way of distinguishing if a Create was called by the running application,
|
||||
// or by a service module. Approach this once we have separated the service modules into their
|
||||
// own processes
|
||||
|
||||
using ResourceArray =
|
||||
std::array<s64, static_cast<std::size_t>(ResourceType::ResourceTypeCount)>;
|
||||
|
||||
ResourceArray limit{};
|
||||
ResourceArray current{};
|
||||
ResourceArray available{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
60
src/core/hle/kernel/server_port.cpp
Executable file
60
src/core/hle/kernel/server_port.cpp
Executable file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <tuple>
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/server_port.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ServerPort::ServerPort(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
ServerPort::~ServerPort() = default;
|
||||
|
||||
ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
|
||||
if (pending_sessions.empty()) {
|
||||
return ERR_NOT_FOUND;
|
||||
}
|
||||
|
||||
auto session = std::move(pending_sessions.back());
|
||||
pending_sessions.pop_back();
|
||||
return MakeResult(std::move(session));
|
||||
}
|
||||
|
||||
void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) {
|
||||
pending_sessions.push_back(std::move(pending_session));
|
||||
}
|
||||
|
||||
bool ServerPort::ShouldWait(const Thread* thread) const {
|
||||
// If there are no pending sessions, we wait until a new one is added.
|
||||
return pending_sessions.empty();
|
||||
}
|
||||
|
||||
void ServerPort::Acquire(Thread* thread) {
|
||||
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
|
||||
}
|
||||
|
||||
bool ServerPort::IsSignaled() const {
|
||||
return !pending_sessions.empty();
|
||||
}
|
||||
|
||||
ServerPort::PortPair ServerPort::CreatePortPair(KernelCore& kernel, u32 max_sessions,
|
||||
std::string name) {
|
||||
std::shared_ptr<ServerPort> server_port = std::make_shared<ServerPort>(kernel);
|
||||
std::shared_ptr<ClientPort> client_port = std::make_shared<ClientPort>(kernel);
|
||||
|
||||
server_port->name = name + "_Server";
|
||||
client_port->name = name + "_Client";
|
||||
client_port->server_port = server_port;
|
||||
client_port->max_sessions = max_sessions;
|
||||
client_port->active_sessions = 0;
|
||||
|
||||
return std::make_pair(std::move(server_port), std::move(client_port));
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
99
src/core/hle/kernel/server_port.h
Executable file
99
src/core/hle/kernel/server_port.h
Executable file
@@ -0,0 +1,99 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ClientPort;
|
||||
class KernelCore;
|
||||
class ServerSession;
|
||||
class SessionRequestHandler;
|
||||
|
||||
class ServerPort final : public SynchronizationObject {
|
||||
public:
|
||||
explicit ServerPort(KernelCore& kernel);
|
||||
~ServerPort() override;
|
||||
|
||||
using HLEHandler = std::shared_ptr<SessionRequestHandler>;
|
||||
using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>;
|
||||
|
||||
/**
|
||||
* Creates a pair of ServerPort and an associated ClientPort.
|
||||
*
|
||||
* @param kernel The kernel instance to create the port pair under.
|
||||
* @param max_sessions Maximum number of sessions to the port
|
||||
* @param name Optional name of the ports
|
||||
* @return The created port tuple
|
||||
*/
|
||||
static PortPair CreatePortPair(KernelCore& kernel, u32 max_sessions,
|
||||
std::string name = "UnknownPort");
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ServerPort";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ServerPort;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Accepts a pending incoming connection on this port. If there are no pending sessions, will
|
||||
* return ERR_NO_PENDING_SESSIONS.
|
||||
*/
|
||||
ResultVal<std::shared_ptr<ServerSession>> Accept();
|
||||
|
||||
/// Whether or not this server port has an HLE handler available.
|
||||
bool HasHLEHandler() const {
|
||||
return hle_handler != nullptr;
|
||||
}
|
||||
|
||||
/// Gets the HLE handler for this port.
|
||||
HLEHandler GetHLEHandler() const {
|
||||
return hle_handler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
|
||||
* will inherit a reference to this handler.
|
||||
*/
|
||||
void SetHleHandler(HLEHandler hle_handler_) {
|
||||
hle_handler = std::move(hle_handler_);
|
||||
}
|
||||
|
||||
/// Appends a ServerSession to the collection of ServerSessions
|
||||
/// waiting to be accepted by this port.
|
||||
void AppendPendingSession(std::shared_ptr<ServerSession> pending_session);
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
private:
|
||||
/// ServerSessions waiting to be accepted by the port
|
||||
std::vector<std::shared_ptr<ServerSession>> pending_sessions;
|
||||
|
||||
/// This session's HLE request handler template (optional)
|
||||
/// ServerSessions created from this port inherit a reference to this handler.
|
||||
HLEHandler hle_handler;
|
||||
|
||||
/// Name of the port (optional)
|
||||
std::string name;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
189
src/core/hle/kernel/server_session.cpp
Executable file
189
src/core/hle/kernel/server_session.cpp
Executable file
@@ -0,0 +1,189 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
|
||||
ServerSession::~ServerSession() {
|
||||
kernel.ReleaseServiceThread(service_thread);
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name) {
|
||||
std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)};
|
||||
|
||||
session->name = std::move(name);
|
||||
session->parent = std::move(parent);
|
||||
session->service_thread = kernel.CreateServiceThread(session->name);
|
||||
|
||||
return MakeResult(std::move(session));
|
||||
}
|
||||
|
||||
bool ServerSession::ShouldWait(const Thread* thread) const {
|
||||
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
|
||||
if (!parent->Client()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Wait if we have no pending requests, or if we're currently handling a request.
|
||||
return pending_requesting_threads.empty() || currently_handling != nullptr;
|
||||
}
|
||||
|
||||
bool ServerSession::IsSignaled() const {
|
||||
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
|
||||
if (!parent->Client()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Wait if we have no pending requests, or if we're currently handling a request.
|
||||
return !pending_requesting_threads.empty() && currently_handling == nullptr;
|
||||
}
|
||||
|
||||
void ServerSession::Acquire(Thread* thread) {
|
||||
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
|
||||
// We are now handling a request, pop it from the stack.
|
||||
// TODO(Subv): What happens if the client endpoint is closed before any requests are made?
|
||||
ASSERT(!pending_requesting_threads.empty());
|
||||
currently_handling = pending_requesting_threads.back();
|
||||
pending_requesting_threads.pop_back();
|
||||
}
|
||||
|
||||
void ServerSession::ClientDisconnected() {
|
||||
// We keep a shared pointer to the hle handler to keep it alive throughout
|
||||
// the call to ClientDisconnected, as ClientDisconnected invalidates the
|
||||
// hle_handler member itself during the course of the function executing.
|
||||
std::shared_ptr<SessionRequestHandler> handler = hle_handler;
|
||||
if (handler) {
|
||||
// Note that after this returns, this server session's hle_handler is
|
||||
// invalidated (set to null).
|
||||
handler->ClientDisconnected(SharedFrom(this));
|
||||
}
|
||||
|
||||
// Clean up the list of client threads with pending requests, they are unneeded now that the
|
||||
// client endpoint is closed.
|
||||
pending_requesting_threads.clear();
|
||||
currently_handling = nullptr;
|
||||
}
|
||||
|
||||
void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) {
|
||||
domain_request_handlers.push_back(std::move(handler));
|
||||
}
|
||||
|
||||
std::size_t ServerSession::NumDomainRequestHandlers() const {
|
||||
return domain_request_handlers.size();
|
||||
}
|
||||
|
||||
ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
|
||||
if (!context.HasDomainMessageHeader()) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
// Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
|
||||
context.SetDomainRequestHandlers(domain_request_handlers);
|
||||
|
||||
// If there is a DomainMessageHeader, then this is CommandType "Request"
|
||||
const auto& domain_message_header = context.GetDomainMessageHeader();
|
||||
const u32 object_id{domain_message_header.object_id};
|
||||
switch (domain_message_header.command) {
|
||||
case IPC::DomainMessageHeader::CommandType::SendMessage:
|
||||
if (object_id > domain_request_handlers.size()) {
|
||||
LOG_CRITICAL(IPC,
|
||||
"object_id {} is too big! This probably means a recent service call "
|
||||
"to {} needed to return a new interface!",
|
||||
object_id, name);
|
||||
UNREACHABLE();
|
||||
return RESULT_SUCCESS; // Ignore error if asserts are off
|
||||
}
|
||||
return domain_request_handlers[object_id - 1]->HandleSyncRequest(context);
|
||||
|
||||
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
|
||||
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
|
||||
|
||||
domain_request_handlers[object_id - 1] = nullptr;
|
||||
|
||||
IPC::ResponseBuilder rb{context, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
|
||||
ASSERT(false);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread,
|
||||
Core::Memory::Memory& memory) {
|
||||
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
|
||||
auto context =
|
||||
std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread));
|
||||
|
||||
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
|
||||
|
||||
if (auto strong_ptr = service_thread.lock()) {
|
||||
strong_ptr->QueueSyncRequest(*this, std::move(context));
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
ResultCode result = RESULT_SUCCESS;
|
||||
// If the session has been converted to a domain, handle the domain request
|
||||
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||
result = HandleDomainSyncRequest(context);
|
||||
// If there is no domain header, the regular session handler is used
|
||||
} else if (hle_handler != nullptr) {
|
||||
// If this ServerSession has an associated HLE handler, forward the request to it.
|
||||
result = hle_handler->HandleSyncRequest(context);
|
||||
}
|
||||
|
||||
if (convert_to_domain) {
|
||||
ASSERT_MSG(IsSession(), "ServerSession is already a domain instance.");
|
||||
domain_request_handlers = {hle_handler};
|
||||
convert_to_domain = false;
|
||||
}
|
||||
|
||||
// Some service requests require the thread to block
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (!context.IsThreadWaiting()) {
|
||||
context.GetThread().ResumeFromWait();
|
||||
context.GetThread().SetSynchronizationResults(nullptr, result);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
|
||||
Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
return QueueSyncRequest(std::move(thread), memory);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
173
src/core/hle/kernel/server_session.h
Executable file
173
src/core/hle/kernel/server_session.h
Executable file
@@ -0,0 +1,173 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/threadsafe_queue.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
struct EventType;
|
||||
} // namespace Core::Timing
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class HLERequestContext;
|
||||
class KernelCore;
|
||||
class Session;
|
||||
class SessionRequestHandler;
|
||||
class Thread;
|
||||
|
||||
/**
|
||||
* Kernel object representing the server endpoint of an IPC session. Sessions are the basic CTR-OS
|
||||
* primitive for communication between different processes, and are used to implement service calls
|
||||
* to the various system services.
|
||||
*
|
||||
* To make a service call, the client must write the command header and parameters to the buffer
|
||||
* located at offset 0x80 of the TLS (Thread-Local Storage) area, then execute a SendSyncRequest
|
||||
* SVC call with its ClientSession handle. The kernel will read the command header, using it to
|
||||
* marshall the parameters to the process at the server endpoint of the session.
|
||||
* After the server replies to the request, the response is marshalled back to the caller's
|
||||
* TLS buffer and control is transferred back to it.
|
||||
*/
|
||||
class ServerSession final : public SynchronizationObject {
|
||||
friend class ServiceThread;
|
||||
|
||||
public:
|
||||
explicit ServerSession(KernelCore& kernel);
|
||||
~ServerSession() override;
|
||||
|
||||
friend class Session;
|
||||
|
||||
static ResultVal<std::shared_ptr<ServerSession>> Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name = "Unknown");
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ServerSession";
|
||||
}
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ServerSession;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
Session* GetParent() {
|
||||
return parent.get();
|
||||
}
|
||||
|
||||
const Session* GetParent() const {
|
||||
return parent.get();
|
||||
}
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
/**
|
||||
* Sets the HLE handler for the session. This handler will be called to service IPC requests
|
||||
* instead of the regular IPC machinery. (The regular IPC machinery is currently not
|
||||
* implemented.)
|
||||
*/
|
||||
void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) {
|
||||
hle_handler = std::move(hle_handler_);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle a sync request from the emulated application.
|
||||
*
|
||||
* @param thread Thread that initiated the request.
|
||||
* @param memory Memory context to handle the sync request under.
|
||||
* @param core_timing Core timing context to schedule the request event under.
|
||||
*
|
||||
* @returns ResultCode from the operation.
|
||||
*/
|
||||
ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
/// Called when a client disconnection occurs.
|
||||
void ClientDisconnected();
|
||||
|
||||
/// Adds a new domain request handler to the collection of request handlers within
|
||||
/// this ServerSession instance.
|
||||
void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler);
|
||||
|
||||
/// Retrieves the total number of domain request handlers that have been
|
||||
/// appended to this ServerSession instance.
|
||||
std::size_t NumDomainRequestHandlers() const;
|
||||
|
||||
/// Returns true if the session has been converted to a domain, otherwise False
|
||||
bool IsDomain() const {
|
||||
return !IsSession();
|
||||
}
|
||||
|
||||
/// Returns true if this session has not been converted to a domain, otherwise false.
|
||||
bool IsSession() const {
|
||||
return domain_request_handlers.empty();
|
||||
}
|
||||
|
||||
/// Converts the session to a domain at the end of the current command
|
||||
void ConvertToDomain() {
|
||||
convert_to_domain = true;
|
||||
}
|
||||
|
||||
private:
|
||||
/// Queues a sync request from the emulated application.
|
||||
ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
|
||||
|
||||
/// Completes a sync request from the emulated application.
|
||||
ResultCode CompleteSyncRequest(HLERequestContext& context);
|
||||
|
||||
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
|
||||
/// object handle.
|
||||
ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
|
||||
|
||||
/// The parent session, which links to the client endpoint.
|
||||
std::shared_ptr<Session> parent;
|
||||
|
||||
/// This session's HLE request handler (applicable when not a domain)
|
||||
std::shared_ptr<SessionRequestHandler> hle_handler;
|
||||
|
||||
/// This is the list of domain request handlers (after conversion to a domain)
|
||||
std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
|
||||
|
||||
/// List of threads that are pending a response after a sync request. This list is processed in
|
||||
/// a LIFO manner, thus, the last request will be dispatched first.
|
||||
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
|
||||
std::vector<std::shared_ptr<Thread>> pending_requesting_threads;
|
||||
|
||||
/// Thread whose request is currently being handled. A request is considered "handled" when a
|
||||
/// response is sent via svcReplyAndReceive.
|
||||
/// TODO(Subv): Find a better name for this.
|
||||
std::shared_ptr<Thread> currently_handling;
|
||||
|
||||
/// When set to True, converts the session to a domain at the end of the command
|
||||
bool convert_to_domain{};
|
||||
|
||||
/// The name of this session (optional)
|
||||
std::string name;
|
||||
|
||||
/// Thread to dispatch service requests
|
||||
std::weak_ptr<ServiceThread> service_thread;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
106
src/core/hle/kernel/service_thread.cpp
Executable file
106
src/core/hle/kernel/service_thread.cpp
Executable file
@@ -0,0 +1,106 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include <queue>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/thread.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ServiceThread::Impl final {
|
||||
public:
|
||||
explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name);
|
||||
~Impl();
|
||||
|
||||
void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
|
||||
|
||||
private:
|
||||
std::vector<std::thread> threads;
|
||||
std::queue<std::function<void()>> requests;
|
||||
std::mutex queue_mutex;
|
||||
std::condition_variable condition;
|
||||
const std::string service_name;
|
||||
bool stop{};
|
||||
};
|
||||
|
||||
ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
||||
: service_name{name} {
|
||||
|
||||
for (std::size_t i = 0; i < num_threads; ++i)
|
||||
threads.emplace_back([this, &kernel] {
|
||||
Common::SetCurrentThreadName(std::string{"Hle_" + service_name}.c_str());
|
||||
|
||||
// Wait for first request before trying to acquire a render context
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
||||
}
|
||||
|
||||
kernel.RegisterHostThread();
|
||||
|
||||
while (true) {
|
||||
std::function<void()> task;
|
||||
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
||||
if (stop || requests.empty()) {
|
||||
return;
|
||||
}
|
||||
task = std::move(requests.front());
|
||||
requests.pop();
|
||||
}
|
||||
|
||||
task();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void ServiceThread::Impl::QueueSyncRequest(ServerSession& session,
|
||||
std::shared_ptr<HLERequestContext>&& context) {
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
requests.emplace([session{SharedFrom(&session)}, context{std::move(context)}]() {
|
||||
session->CompleteSyncRequest(*context);
|
||||
return;
|
||||
});
|
||||
}
|
||||
condition.notify_one();
|
||||
}
|
||||
|
||||
ServiceThread::Impl::~Impl() {
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
stop = true;
|
||||
}
|
||||
condition.notify_all();
|
||||
for (std::thread& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_thread, const std::string& name)
|
||||
: impl{std::make_unique<Impl>(kernel, num_thread, name)} {}
|
||||
|
||||
ServiceThread::~ServiceThread() = default;
|
||||
|
||||
void ServiceThread::QueueSyncRequest(ServerSession& session,
|
||||
std::shared_ptr<HLERequestContext>&& context) {
|
||||
impl->QueueSyncRequest(session, std::move(context));
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
28
src/core/hle/kernel/service_thread.h
Executable file
28
src/core/hle/kernel/service_thread.h
Executable file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class HLERequestContext;
|
||||
class KernelCore;
|
||||
class ServerSession;
|
||||
|
||||
class ServiceThread final {
|
||||
public:
|
||||
explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name);
|
||||
~ServiceThread();
|
||||
|
||||
void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
41
src/core/hle/kernel/session.cpp
Executable file
41
src/core/hle/kernel/session.cpp
Executable file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Session::Session(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
Session::~Session() = default;
|
||||
|
||||
Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
|
||||
auto session{std::make_shared<Session>(kernel)};
|
||||
auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()};
|
||||
auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()};
|
||||
|
||||
session->name = std::move(name);
|
||||
session->client = client_session;
|
||||
session->server = server_session;
|
||||
|
||||
return std::make_pair(std::move(client_session), std::move(server_session));
|
||||
}
|
||||
|
||||
bool Session::ShouldWait(const Thread* thread) const {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
bool Session::IsSignaled() const {
|
||||
UNIMPLEMENTED();
|
||||
return true;
|
||||
}
|
||||
|
||||
void Session::Acquire(Thread* thread) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
66
src/core/hle/kernel/session.h
Executable file
66
src/core/hle/kernel/session.h
Executable file
@@ -0,0 +1,66 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ClientSession;
|
||||
class ServerSession;
|
||||
|
||||
/**
|
||||
* Parent structure to link the client and server endpoints of a session with their associated
|
||||
* client port.
|
||||
*/
|
||||
class Session final : public SynchronizationObject {
|
||||
public:
|
||||
explicit Session(KernelCore& kernel);
|
||||
~Session() override;
|
||||
|
||||
using SessionPair = std::pair<std::shared_ptr<ClientSession>, std::shared_ptr<ServerSession>>;
|
||||
|
||||
static SessionPair Create(KernelCore& kernel, std::string name = "Unknown");
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::Session;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
std::shared_ptr<ClientSession> Client() {
|
||||
if (auto result{client.lock()}) {
|
||||
return result;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::shared_ptr<ServerSession> Server() {
|
||||
if (auto result{server.lock()}) {
|
||||
return result;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
std::string name;
|
||||
std::weak_ptr<ClientSession> client;
|
||||
std::weak_ptr<ServerSession> server;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
57
src/core/hle/kernel/shared_memory.cpp
Executable file
57
src/core/hle/kernel/shared_memory.cpp
Executable file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
SharedMemory::SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory)
|
||||
: Object{kernel}, device_memory{device_memory} {}
|
||||
|
||||
SharedMemory::~SharedMemory() = default;
|
||||
|
||||
std::shared_ptr<SharedMemory> SharedMemory::Create(
|
||||
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
|
||||
Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission,
|
||||
Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
|
||||
std::string name) {
|
||||
|
||||
std::shared_ptr<SharedMemory> shared_memory{
|
||||
std::make_shared<SharedMemory>(kernel, device_memory)};
|
||||
|
||||
shared_memory->owner_process = owner_process;
|
||||
shared_memory->page_list = std::move(page_list);
|
||||
shared_memory->owner_permission = owner_permission;
|
||||
shared_memory->user_permission = user_permission;
|
||||
shared_memory->physical_address = physical_address;
|
||||
shared_memory->size = size;
|
||||
shared_memory->name = name;
|
||||
|
||||
return shared_memory;
|
||||
}
|
||||
|
||||
ResultCode SharedMemory::Map(Process& target_process, VAddr address, std::size_t size,
|
||||
Memory::MemoryPermission permissions) {
|
||||
const u64 page_count{(size + Memory::PageSize - 1) / Memory::PageSize};
|
||||
|
||||
if (page_list.GetNumPages() != page_count) {
|
||||
UNIMPLEMENTED_MSG("Page count does not match");
|
||||
}
|
||||
|
||||
const Memory::MemoryPermission expected =
|
||||
&target_process == owner_process ? owner_permission : user_permission;
|
||||
|
||||
if (permissions != expected) {
|
||||
UNIMPLEMENTED_MSG("Permission does not match");
|
||||
}
|
||||
|
||||
return target_process.PageTable().MapPages(address, page_list, Memory::MemoryState::Shared,
|
||||
permissions);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
85
src/core/hle/kernel/shared_memory.h
Executable file
85
src/core/hle/kernel/shared_memory.h
Executable file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/page_linked_list.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
class SharedMemory final : public Object {
|
||||
public:
|
||||
explicit SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory);
|
||||
~SharedMemory() override;
|
||||
|
||||
static std::shared_ptr<SharedMemory> Create(
|
||||
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
|
||||
Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission,
|
||||
Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
|
||||
std::string name);
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "SharedMemory";
|
||||
}
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::SharedMemory;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps a shared memory block to an address in the target process' address space
|
||||
* @param target_process Process on which to map the memory block
|
||||
* @param address Address in system memory to map shared memory block to
|
||||
* @param size Size of the shared memory block to map
|
||||
* @param permissions Memory block map permissions (specified by SVC field)
|
||||
*/
|
||||
ResultCode Map(Process& target_process, VAddr address, std::size_t size,
|
||||
Memory::MemoryPermission permissions);
|
||||
|
||||
/**
|
||||
* Gets a pointer to the shared memory block
|
||||
* @param offset Offset from the start of the shared memory block to get pointer
|
||||
* @return A pointer to the shared memory block from the specified offset
|
||||
*/
|
||||
u8* GetPointer(std::size_t offset = 0) {
|
||||
return device_memory.GetPointer(physical_address + offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a pointer to the shared memory block
|
||||
* @param offset Offset from the start of the shared memory block to get pointer
|
||||
* @return A pointer to the shared memory block from the specified offset
|
||||
*/
|
||||
const u8* GetPointer(std::size_t offset = 0) const {
|
||||
return device_memory.GetPointer(physical_address + offset);
|
||||
}
|
||||
|
||||
private:
|
||||
Core::DeviceMemory& device_memory;
|
||||
Process* owner_process{};
|
||||
Memory::PageLinkedList page_list;
|
||||
Memory::MemoryPermission owner_permission{};
|
||||
Memory::MemoryPermission user_permission{};
|
||||
PAddr physical_address{};
|
||||
std::size_t size{};
|
||||
std::string name;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
2641
src/core/hle/kernel/svc.cpp
Executable file
2641
src/core/hle/kernel/svc.cpp
Executable file
File diff suppressed because it is too large
Load Diff
17
src/core/hle/kernel/svc.h
Executable file
17
src/core/hle/kernel/svc.h
Executable file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel::Svc {
|
||||
|
||||
void Call(Core::System& system, u32 immediate);
|
||||
|
||||
} // namespace Kernel::Svc
|
||||
68
src/core/hle/kernel/svc_types.h
Executable file
68
src/core/hle/kernel/svc_types.h
Executable file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Svc {
|
||||
|
||||
enum class MemoryState : u32 {
|
||||
Free = 0x00,
|
||||
Io = 0x01,
|
||||
Static = 0x02,
|
||||
Code = 0x03,
|
||||
CodeData = 0x04,
|
||||
Normal = 0x05,
|
||||
Shared = 0x06,
|
||||
Alias = 0x07,
|
||||
AliasCode = 0x08,
|
||||
AliasCodeData = 0x09,
|
||||
Ipc = 0x0A,
|
||||
Stack = 0x0B,
|
||||
ThreadLocal = 0x0C,
|
||||
Transfered = 0x0D,
|
||||
SharedTransfered = 0x0E,
|
||||
SharedCode = 0x0F,
|
||||
Inaccessible = 0x10,
|
||||
NonSecureIpc = 0x11,
|
||||
NonDeviceIpc = 0x12,
|
||||
Kernel = 0x13,
|
||||
GeneratedCode = 0x14,
|
||||
CodeOut = 0x15,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
|
||||
|
||||
enum class MemoryAttribute : u32 {
|
||||
Locked = (1 << 0),
|
||||
IpcLocked = (1 << 1),
|
||||
DeviceShared = (1 << 2),
|
||||
Uncached = (1 << 3),
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
|
||||
|
||||
enum class MemoryPermission : u32 {
|
||||
None = (0 << 0),
|
||||
Read = (1 << 0),
|
||||
Write = (1 << 1),
|
||||
Execute = (1 << 2),
|
||||
ReadWrite = Read | Write,
|
||||
ReadExecute = Read | Execute,
|
||||
DontCare = (1 << 28),
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission);
|
||||
|
||||
struct MemoryInfo {
|
||||
u64 addr{};
|
||||
u64 size{};
|
||||
MemoryState state{};
|
||||
MemoryAttribute attr{};
|
||||
MemoryPermission perm{};
|
||||
u32 ipc_refcount{};
|
||||
u32 device_refcount{};
|
||||
u32 padding{};
|
||||
};
|
||||
|
||||
} // namespace Kernel::Svc
|
||||
552
src/core/hle/kernel/svc_wrap.h
Executable file
552
src/core/hle/kernel/svc_wrap.h
Executable file
@@ -0,0 +1,552 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
static inline u64 Param(const Core::System& system, int n) {
|
||||
return system.CurrentArmInterface().GetReg(n);
|
||||
}
|
||||
|
||||
static inline u32 Param32(const Core::System& system, int n) {
|
||||
return static_cast<u32>(system.CurrentArmInterface().GetReg(n));
|
||||
}
|
||||
|
||||
/**
|
||||
* HLE a function return from the current ARM userland process
|
||||
* @param system System context
|
||||
* @param result Result to return
|
||||
*/
|
||||
static inline void FuncReturn(Core::System& system, u64 result) {
|
||||
system.CurrentArmInterface().SetReg(0, result);
|
||||
}
|
||||
|
||||
static inline void FuncReturn32(Core::System& system, u32 result) {
|
||||
system.CurrentArmInterface().SetReg(0, (u64)result);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Function wrappers that return type ResultCode
|
||||
|
||||
template <ResultCode func(Core::System&, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), Param(system, 1)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(
|
||||
system,
|
||||
func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u64, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
|
||||
Param(system, 2), Param(system, 3))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param = 0;
|
||||
const u32 retval = func(system, ¶m).raw;
|
||||
system.CurrentArmInterface().SetReg(1, param);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, static_cast<u32>(Param(system, 1))).raw;
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u32*)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
u32 param_2 = 0;
|
||||
const u32 retval = func(system, ¶m_1, ¶m_2).raw;
|
||||
|
||||
auto& arm_interface = system.CurrentArmInterface();
|
||||
arm_interface.SetReg(1, param_1);
|
||||
arm_interface.SetReg(2, param_2);
|
||||
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1)).raw;
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u64, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval =
|
||||
func(system, ¶m_1, Param(system, 1), static_cast<u32>(Param(system, 2))).raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64*, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u64 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, static_cast<u32>(Param(system, 1))).raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1))).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64*, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u64 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1)).raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64*, u32, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u64 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, static_cast<u32>(Param(system, 1)),
|
||||
static_cast<u32>(Param(system, 2)))
|
||||
.raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u32, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
|
||||
static_cast<u32>(Param(system, 1)), Param(system, 2))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u32*, u64*)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
u64 param_2 = 0;
|
||||
const ResultCode retval = func(system, static_cast<u32>(Param(system, 2)), ¶m_1, ¶m_2);
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
system.CurrentArmInterface().SetReg(2, param_2);
|
||||
FuncReturn(system, retval.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64, u32, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
|
||||
static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64, u32, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
|
||||
static_cast<u32>(Param(system, 2)), Param(system, 3))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u64, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
|
||||
static_cast<u32>(Param(system, 2)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), Param(system, 1), Param(system, 2)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(
|
||||
system,
|
||||
func(system, Param(system, 0), Param(system, 1), static_cast<u32>(Param(system, 2))).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u64, u64, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
|
||||
Param(system, 2), static_cast<u32>(Param(system, 3)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(
|
||||
system,
|
||||
func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u64, u64, s64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
|
||||
static_cast<s64>(Param(system, 3)))
|
||||
.raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64, u32, s64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
|
||||
static_cast<u32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64*, u64, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u64 param_1 = 0;
|
||||
const u32 retval =
|
||||
func(system, ¶m_1, Param(system, 1), Param(system, 2), Param(system, 3)).raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u64, u64, u64, u32, s32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1), Param(system, 2), Param(system, 3),
|
||||
static_cast<u32>(Param(system, 4)), static_cast<s32>(Param(system, 5)))
|
||||
.raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u64, u64, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1), Param(system, 2),
|
||||
static_cast<u32>(Param(system, 3)))
|
||||
.raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, Handle*, u64, u32, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
|
||||
static_cast<u32>(Param(system, 3)))
|
||||
.raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u32, s32, s64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u32, s32, s32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Function wrappers that return type u32
|
||||
|
||||
template <u32 func(Core::System&)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Function wrappers that return type u64
|
||||
|
||||
template <u64 func(Core::System&)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/// Function wrappers that return type void
|
||||
|
||||
template <void func(Core::System&)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
func(system);
|
||||
}
|
||||
|
||||
template <void func(Core::System&, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
func(system, static_cast<u32>(Param(system, 0)));
|
||||
}
|
||||
|
||||
template <void func(Core::System&, u32, u64, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2),
|
||||
Param(system, 3));
|
||||
}
|
||||
|
||||
template <void func(Core::System&, s64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
func(system, static_cast<s64>(Param(system, 0)));
|
||||
}
|
||||
|
||||
template <void func(Core::System&, u64, s32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
func(system, Param(system, 0), static_cast<s32>(Param(system, 1)));
|
||||
}
|
||||
|
||||
template <void func(Core::System&, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
func(system, Param(system, 0), Param(system, 1));
|
||||
}
|
||||
|
||||
template <void func(Core::System&, u64, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
func(system, Param(system, 0), Param(system, 1), Param(system, 2));
|
||||
}
|
||||
|
||||
template <void func(Core::System&, u32, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2));
|
||||
}
|
||||
|
||||
// Used by QueryMemory32, ArbitrateLock32
|
||||
template <ResultCode func(Core::System&, u32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
FuncReturn32(system,
|
||||
func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw);
|
||||
}
|
||||
|
||||
// Used by Break32
|
||||
template <void func(Core::System&, u32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2));
|
||||
}
|
||||
|
||||
// Used by ExitProcess32, ExitThread32
|
||||
template <void func(Core::System&)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
func(system);
|
||||
}
|
||||
|
||||
// Used by GetCurrentProcessorNumber32
|
||||
template <u32 func(Core::System&)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
FuncReturn32(system, func(system));
|
||||
}
|
||||
|
||||
// Used by SleepThread32
|
||||
template <void func(Core::System&, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
func(system, Param32(system, 0), Param32(system, 1));
|
||||
}
|
||||
|
||||
// Used by CreateThread32
|
||||
template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
Handle param_1 = 0;
|
||||
|
||||
const u32 retval = func(system, ¶m_1, Param32(system, 0), Param32(system, 1),
|
||||
Param32(system, 2), Param32(system, 3), Param32(system, 4))
|
||||
.raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by GetInfo32
|
||||
template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
u32 param_2 = 0;
|
||||
|
||||
const u32 retval = func(system, ¶m_1, ¶m_2, Param32(system, 0), Param32(system, 1),
|
||||
Param32(system, 2), Param32(system, 3))
|
||||
.raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
system.CurrentArmInterface().SetReg(2, param_2);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by GetThreadPriority32, ConnectToNamedPort32
|
||||
template <ResultCode func(Core::System&, u32*, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param32(system, 1)).raw;
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by GetThreadId32
|
||||
template <ResultCode func(Core::System&, u32*, u32*, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
u32 param_2 = 0;
|
||||
|
||||
const u32 retval = func(system, ¶m_1, ¶m_2, Param32(system, 1)).raw;
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
system.CurrentArmInterface().SetReg(2, param_2);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by GetSystemTick32
|
||||
template <void func(Core::System&, u32*, u32*)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
u32 param_2 = 0;
|
||||
|
||||
func(system, ¶m_1, ¶m_2);
|
||||
system.CurrentArmInterface().SetReg(0, param_1);
|
||||
system.CurrentArmInterface().SetReg(1, param_2);
|
||||
}
|
||||
|
||||
// Used by CreateEvent32
|
||||
template <ResultCode func(Core::System&, Handle*, Handle*)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
Handle param_1 = 0;
|
||||
Handle param_2 = 0;
|
||||
|
||||
const u32 retval = func(system, ¶m_1, ¶m_2).raw;
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
system.CurrentArmInterface().SetReg(2, param_2);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by GetThreadId32
|
||||
template <ResultCode func(Core::System&, Handle, u32*, u32*, u32*)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
u32 param_2 = 0;
|
||||
u32 param_3 = 0;
|
||||
|
||||
const u32 retval = func(system, Param32(system, 2), ¶m_1, ¶m_2, ¶m_3).raw;
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
system.CurrentArmInterface().SetReg(2, param_2);
|
||||
system.CurrentArmInterface().SetReg(3, param_3);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by SignalProcessWideKey32
|
||||
template <void func(Core::System&, u32, s32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1)));
|
||||
}
|
||||
|
||||
// Used by SetThreadPriority32
|
||||
template <ResultCode func(Core::System&, Handle, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval =
|
||||
func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw;
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by SetThreadCoreMask32
|
||||
template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval =
|
||||
func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
|
||||
static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
|
||||
.raw;
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by WaitProcessWideKeyAtomic32
|
||||
template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval =
|
||||
func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
|
||||
static_cast<Handle>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
|
||||
static_cast<u32>(Param(system, 4)))
|
||||
.raw;
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by WaitForAddress32
|
||||
template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
|
||||
static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)),
|
||||
static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4)))
|
||||
.raw;
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by SignalToAddress32
|
||||
template <ResultCode func(Core::System&, u32, u32, s32, s32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval =
|
||||
func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
|
||||
.raw;
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by SendSyncRequest32, ArbitrateUnlock32
|
||||
template <ResultCode func(Core::System&, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
|
||||
}
|
||||
|
||||
// Used by CreateTransferMemory32
|
||||
template <ResultCode func(Core::System&, Handle*, u32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
Handle handle = 0;
|
||||
const u32 retval =
|
||||
func(system, &handle, Param32(system, 1), Param32(system, 2), Param32(system, 3)).raw;
|
||||
system.CurrentArmInterface().SetReg(1, handle);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by WaitSynchronization32
|
||||
template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
|
||||
Param32(system, 3), ¶m_1)
|
||||
.raw;
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
116
src/core/hle/kernel/synchronization.cpp
Executable file
116
src/core/hle/kernel/synchronization.cpp
Executable file
@@ -0,0 +1,116 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/synchronization.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Synchronization::Synchronization(Core::System& system) : system{system} {}
|
||||
|
||||
void Synchronization::SignalObject(SynchronizationObject& obj) const {
|
||||
auto& kernel = system.Kernel();
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (obj.IsSignaled()) {
|
||||
for (auto thread : obj.GetWaitingThreads()) {
|
||||
if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
|
||||
if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) {
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
|
||||
ASSERT(thread->IsWaitingSync());
|
||||
}
|
||||
thread->SetSynchronizationResults(&obj, RESULT_SUCCESS);
|
||||
thread->ResumeFromWait();
|
||||
}
|
||||
}
|
||||
obj.ClearWaitingThreads();
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<ResultCode, Handle> Synchronization::WaitFor(
|
||||
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
|
||||
auto& kernel = system.Kernel();
|
||||
auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
|
||||
const auto itr =
|
||||
std::find_if(sync_objects.begin(), sync_objects.end(),
|
||||
[thread](const std::shared_ptr<SynchronizationObject>& object) {
|
||||
return object->IsSignaled();
|
||||
});
|
||||
|
||||
if (itr != sync_objects.end()) {
|
||||
// We found a ready object, acquire it and set the result value
|
||||
SynchronizationObject* object = itr->get();
|
||||
object->Acquire(thread);
|
||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
||||
lock.CancelSleep();
|
||||
return {RESULT_SUCCESS, index};
|
||||
}
|
||||
|
||||
if (nano_seconds == 0) {
|
||||
lock.CancelSleep();
|
||||
return {RESULT_TIMEOUT, InvalidHandle};
|
||||
}
|
||||
|
||||
if (thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return {ERR_THREAD_TERMINATING, InvalidHandle};
|
||||
}
|
||||
|
||||
if (thread->IsSyncCancelled()) {
|
||||
thread->SetSyncCancelled(false);
|
||||
lock.CancelSleep();
|
||||
return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
|
||||
}
|
||||
|
||||
for (auto& object : sync_objects) {
|
||||
object->AddWaitingThread(SharedFrom(thread));
|
||||
}
|
||||
|
||||
thread->SetSynchronizationObjects(&sync_objects);
|
||||
thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
thread->SetStatus(ThreadStatus::WaitSynch);
|
||||
thread->SetWaitingSync(true);
|
||||
}
|
||||
thread->SetWaitingSync(false);
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
ResultCode signaling_result = thread->GetSignalingResult();
|
||||
SynchronizationObject* signaling_object = thread->GetSignalingObject();
|
||||
thread->SetSynchronizationObjects(nullptr);
|
||||
auto shared_thread = SharedFrom(thread);
|
||||
for (auto& obj : sync_objects) {
|
||||
obj->RemoveWaitingThread(shared_thread);
|
||||
}
|
||||
if (signaling_object != nullptr) {
|
||||
const auto itr = std::find_if(
|
||||
sync_objects.begin(), sync_objects.end(),
|
||||
[signaling_object](const std::shared_ptr<SynchronizationObject>& object) {
|
||||
return object.get() == signaling_object;
|
||||
});
|
||||
ASSERT(itr != sync_objects.end());
|
||||
signaling_object->Acquire(thread);
|
||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
||||
return {signaling_result, index};
|
||||
}
|
||||
return {signaling_result, -1};
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
44
src/core/hle/kernel/synchronization.h
Executable file
44
src/core/hle/kernel/synchronization.h
Executable file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class SynchronizationObject;
|
||||
|
||||
/**
|
||||
* The 'Synchronization' class is an interface for handling synchronization methods
|
||||
* used by Synchronization objects and synchronization SVCs. This centralizes processing of
|
||||
* such
|
||||
*/
|
||||
class Synchronization {
|
||||
public:
|
||||
explicit Synchronization(Core::System& system);
|
||||
|
||||
/// Signals a synchronization object, waking up all its waiting threads
|
||||
void SignalObject(SynchronizationObject& obj) const;
|
||||
|
||||
/// Tries to see if waiting for any of the sync_objects is necessary, if not
|
||||
/// it returns Success and the handle index of the signaled sync object. In
|
||||
/// case not, the current thread will be locked and wait for nano_seconds or
|
||||
/// for a synchronization object to signal.
|
||||
std::pair<ResultCode, Handle> WaitFor(
|
||||
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
};
|
||||
} // namespace Kernel
|
||||
49
src/core/hle/kernel/synchronization_object.cpp
Executable file
49
src/core/hle/kernel/synchronization_object.cpp
Executable file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/synchronization.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
SynchronizationObject::SynchronizationObject(KernelCore& kernel) : Object{kernel} {}
|
||||
SynchronizationObject::~SynchronizationObject() = default;
|
||||
|
||||
void SynchronizationObject::Signal() {
|
||||
kernel.Synchronization().SignalObject(*this);
|
||||
}
|
||||
|
||||
void SynchronizationObject::AddWaitingThread(std::shared_ptr<Thread> thread) {
|
||||
auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
|
||||
if (itr == waiting_threads.end())
|
||||
waiting_threads.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread) {
|
||||
auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
|
||||
// If a thread passed multiple handles to the same object,
|
||||
// the kernel might attempt to remove the thread from the object's
|
||||
// waiting threads list multiple times.
|
||||
if (itr != waiting_threads.end())
|
||||
waiting_threads.erase(itr);
|
||||
}
|
||||
|
||||
void SynchronizationObject::ClearWaitingThreads() {
|
||||
waiting_threads.clear();
|
||||
}
|
||||
|
||||
const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const {
|
||||
return waiting_threads;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
77
src/core/hle/kernel/synchronization_object.h
Executable file
77
src/core/hle/kernel/synchronization_object.h
Executable file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Synchronization;
|
||||
class Thread;
|
||||
|
||||
/// Class that represents a Kernel object that a thread can be waiting on
|
||||
class SynchronizationObject : public Object {
|
||||
public:
|
||||
explicit SynchronizationObject(KernelCore& kernel);
|
||||
~SynchronizationObject() override;
|
||||
|
||||
/**
|
||||
* Check if the specified thread should wait until the object is available
|
||||
* @param thread The thread about which we're deciding.
|
||||
* @return True if the current thread should wait due to this object being unavailable
|
||||
*/
|
||||
virtual bool ShouldWait(const Thread* thread) const = 0;
|
||||
|
||||
/// Acquire/lock the object for the specified thread if it is available
|
||||
virtual void Acquire(Thread* thread) = 0;
|
||||
|
||||
/// Signal this object
|
||||
virtual void Signal();
|
||||
|
||||
virtual bool IsSignaled() const {
|
||||
return is_signaled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a thread to wait on this object
|
||||
* @param thread Pointer to thread to add
|
||||
*/
|
||||
void AddWaitingThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/**
|
||||
* Removes a thread from waiting on this object (e.g. if it was resumed already)
|
||||
* @param thread Pointer to thread to remove
|
||||
*/
|
||||
void RemoveWaitingThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Get a const reference to the waiting threads list for debug use
|
||||
const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
|
||||
|
||||
void ClearWaitingThreads();
|
||||
|
||||
protected:
|
||||
std::atomic_bool is_signaled{}; // Tells if this sync object is signaled
|
||||
|
||||
private:
|
||||
/// Threads waiting for this object to become available
|
||||
std::vector<std::shared_ptr<Thread>> waiting_threads;
|
||||
};
|
||||
|
||||
// Specialization of DynamicObjectCast for SynchronizationObjects
|
||||
template <>
|
||||
inline std::shared_ptr<SynchronizationObject> DynamicObjectCast<SynchronizationObject>(
|
||||
std::shared_ptr<Object> object) {
|
||||
if (object != nullptr && object->IsWaitable()) {
|
||||
return std::static_pointer_cast<SynchronizationObject>(object);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
478
src/core/hle/kernel/thread.cpp
Executable file
478
src/core/hle/kernel/thread.cpp
Executable file
@@ -0,0 +1,478 @@
|
||||
// Copyright 2014 Citra Emulator Project / PPSSPP Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/fiber.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/thread_queue_list.h"
|
||||
#include "core/core.h"
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
#include "core/arm/dynarmic/arm_dynarmic_32.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
||||
#endif
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
bool Thread::ShouldWait(const Thread* thread) const {
|
||||
return status != ThreadStatus::Dead;
|
||||
}
|
||||
|
||||
bool Thread::IsSignaled() const {
|
||||
return status == ThreadStatus::Dead;
|
||||
}
|
||||
|
||||
void Thread::Acquire(Thread* thread) {
|
||||
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
|
||||
}
|
||||
|
||||
Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
Thread::~Thread() = default;
|
||||
|
||||
void Thread::Stop() {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetStatus(ThreadStatus::Dead);
|
||||
Signal();
|
||||
kernel.GlobalHandleTable().Close(global_handle);
|
||||
|
||||
if (owner_process) {
|
||||
owner_process->UnregisterThread(this);
|
||||
|
||||
// Mark the TLS slot in the thread's page as free.
|
||||
owner_process->FreeTLSRegion(tls_address);
|
||||
}
|
||||
has_exited = true;
|
||||
}
|
||||
global_handle = 0;
|
||||
}
|
||||
|
||||
void Thread::ResumeFromWait() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
switch (status) {
|
||||
case ThreadStatus::Paused:
|
||||
case ThreadStatus::WaitSynch:
|
||||
case ThreadStatus::WaitHLEEvent:
|
||||
case ThreadStatus::WaitSleep:
|
||||
case ThreadStatus::WaitIPC:
|
||||
case ThreadStatus::WaitMutex:
|
||||
case ThreadStatus::WaitCondVar:
|
||||
case ThreadStatus::WaitArb:
|
||||
case ThreadStatus::Dormant:
|
||||
break;
|
||||
|
||||
case ThreadStatus::Ready:
|
||||
// The thread's wakeup callback must have already been cleared when the thread was first
|
||||
// awoken.
|
||||
ASSERT(hle_callback == nullptr);
|
||||
// If the thread is waiting on multiple wait objects, it might be awoken more than once
|
||||
// before actually resuming. We can ignore subsequent wakeups if the thread status has
|
||||
// already been set to ThreadStatus::Ready.
|
||||
return;
|
||||
case ThreadStatus::Dead:
|
||||
// This should never happen, as threads must complete before being stopped.
|
||||
DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
|
||||
GetObjectId());
|
||||
return;
|
||||
}
|
||||
|
||||
SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
|
||||
void Thread::OnWakeUp() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
|
||||
ResultCode Thread::Start() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetStatus(ThreadStatus::Ready);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
void Thread::CancelWait() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
|
||||
is_sync_cancelled = true;
|
||||
return;
|
||||
}
|
||||
// TODO(Blinkhawk): Implement cancel of server session
|
||||
is_sync_cancelled = false;
|
||||
SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
|
||||
SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
|
||||
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
|
||||
u32 entry_point, u32 arg) {
|
||||
context = {};
|
||||
context.cpu_registers[0] = arg;
|
||||
context.cpu_registers[15] = entry_point;
|
||||
context.cpu_registers[13] = stack_top;
|
||||
}
|
||||
|
||||
static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
|
||||
VAddr entry_point, u64 arg) {
|
||||
context = {};
|
||||
context.cpu_registers[0] = arg;
|
||||
context.pc = entry_point;
|
||||
context.sp = stack_top;
|
||||
// TODO(merry): Perform a hardware test to determine the below value.
|
||||
context.fpcr = 0;
|
||||
}
|
||||
|
||||
std::shared_ptr<Common::Fiber>& Thread::GetHostContext() {
|
||||
return host_context;
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point, u32 priority,
|
||||
u64 arg, s32 processor_id, VAddr stack_top,
|
||||
Process* owner_process) {
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
|
||||
owner_process, std::move(init_func), init_func_parameter);
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point, u32 priority,
|
||||
u64 arg, s32 processor_id, VAddr stack_top,
|
||||
Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func,
|
||||
void* thread_start_parameter) {
|
||||
auto& kernel = system.Kernel();
|
||||
// Check if priority is in ranged. Lowest priority -> highest priority id.
|
||||
if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) {
|
||||
LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
|
||||
return ERR_INVALID_THREAD_PRIORITY;
|
||||
}
|
||||
|
||||
if (processor_id > THREADPROCESSORID_MAX) {
|
||||
LOG_ERROR(Kernel_SVC, "Invalid processor id: {}", processor_id);
|
||||
return ERR_INVALID_PROCESSOR_ID;
|
||||
}
|
||||
|
||||
if (owner_process) {
|
||||
if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) {
|
||||
LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
|
||||
// TODO (bunnei): Find the correct error code to use here
|
||||
return RESULT_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
|
||||
|
||||
thread->thread_id = kernel.CreateNewThreadID();
|
||||
thread->status = ThreadStatus::Dormant;
|
||||
thread->entry_point = entry_point;
|
||||
thread->stack_top = stack_top;
|
||||
thread->disable_count = 1;
|
||||
thread->tpidr_el0 = 0;
|
||||
thread->nominal_priority = thread->current_priority = priority;
|
||||
thread->schedule_count = -1;
|
||||
thread->last_scheduled_tick = 0;
|
||||
thread->processor_id = processor_id;
|
||||
thread->ideal_core = processor_id;
|
||||
thread->affinity_mask.SetAffinity(processor_id, true);
|
||||
thread->wait_objects = nullptr;
|
||||
thread->mutex_wait_address = 0;
|
||||
thread->condvar_wait_address = 0;
|
||||
thread->wait_handle = 0;
|
||||
thread->name = std::move(name);
|
||||
thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
|
||||
thread->owner_process = owner_process;
|
||||
thread->type = type_flags;
|
||||
if ((type_flags & THREADTYPE_IDLE) == 0) {
|
||||
auto& scheduler = kernel.GlobalSchedulerContext();
|
||||
scheduler.AddThread(thread);
|
||||
}
|
||||
if (owner_process) {
|
||||
thread->tls_address = thread->owner_process->CreateTLSRegion();
|
||||
thread->owner_process->RegisterThread(thread.get());
|
||||
} else {
|
||||
thread->tls_address = 0;
|
||||
}
|
||||
|
||||
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
|
||||
// to initialize the context
|
||||
if ((type_flags & THREADTYPE_HLE) == 0) {
|
||||
ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
|
||||
static_cast<u32>(entry_point), static_cast<u32>(arg));
|
||||
ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
|
||||
}
|
||||
thread->host_context =
|
||||
std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
|
||||
|
||||
return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
|
||||
}
|
||||
|
||||
void Thread::SetPriority(u32 priority) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
|
||||
"Invalid priority value.");
|
||||
nominal_priority = priority;
|
||||
UpdatePriority();
|
||||
}
|
||||
|
||||
void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) {
|
||||
signaling_object = object;
|
||||
signaling_result = result;
|
||||
}
|
||||
|
||||
s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const {
|
||||
ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything");
|
||||
const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object);
|
||||
return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1);
|
||||
}
|
||||
|
||||
VAddr Thread::GetCommandBufferAddress() const {
|
||||
// Offset from the start of TLS at which the IPC command buffer begins.
|
||||
constexpr u64 command_header_offset = 0x80;
|
||||
return GetTLSAddress() + command_header_offset;
|
||||
}
|
||||
|
||||
void Thread::SetStatus(ThreadStatus new_status) {
|
||||
if (new_status == status) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (new_status) {
|
||||
case ThreadStatus::Ready:
|
||||
SetSchedulingStatus(ThreadSchedStatus::Runnable);
|
||||
break;
|
||||
case ThreadStatus::Dormant:
|
||||
SetSchedulingStatus(ThreadSchedStatus::None);
|
||||
break;
|
||||
case ThreadStatus::Dead:
|
||||
SetSchedulingStatus(ThreadSchedStatus::Exited);
|
||||
break;
|
||||
default:
|
||||
SetSchedulingStatus(ThreadSchedStatus::Paused);
|
||||
break;
|
||||
}
|
||||
|
||||
status = new_status;
|
||||
}
|
||||
|
||||
void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) {
|
||||
if (thread->lock_owner.get() == this) {
|
||||
// If the thread is already waiting for this thread to release the mutex, ensure that the
|
||||
// waiters list is consistent and return without doing anything.
|
||||
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
|
||||
ASSERT(iter != wait_mutex_threads.end());
|
||||
return;
|
||||
}
|
||||
|
||||
// A thread can't wait on two different mutexes at the same time.
|
||||
ASSERT(thread->lock_owner == nullptr);
|
||||
|
||||
// Ensure that the thread is not already in the list of mutex waiters
|
||||
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
|
||||
ASSERT(iter == wait_mutex_threads.end());
|
||||
|
||||
// Keep the list in an ordered fashion
|
||||
const auto insertion_point = std::find_if(
|
||||
wait_mutex_threads.begin(), wait_mutex_threads.end(),
|
||||
[&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
|
||||
wait_mutex_threads.insert(insertion_point, thread);
|
||||
thread->lock_owner = SharedFrom(this);
|
||||
|
||||
UpdatePriority();
|
||||
}
|
||||
|
||||
void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) {
|
||||
ASSERT(thread->lock_owner.get() == this);
|
||||
|
||||
// Ensure that the thread is in the list of mutex waiters
|
||||
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
|
||||
ASSERT(iter != wait_mutex_threads.end());
|
||||
|
||||
wait_mutex_threads.erase(iter);
|
||||
|
||||
thread->lock_owner = nullptr;
|
||||
UpdatePriority();
|
||||
}
|
||||
|
||||
void Thread::UpdatePriority() {
|
||||
// If any of the threads waiting on the mutex have a higher priority
|
||||
// (taking into account priority inheritance), then this thread inherits
|
||||
// that thread's priority.
|
||||
u32 new_priority = nominal_priority;
|
||||
if (!wait_mutex_threads.empty()) {
|
||||
if (wait_mutex_threads.front()->current_priority < new_priority) {
|
||||
new_priority = wait_mutex_threads.front()->current_priority;
|
||||
}
|
||||
}
|
||||
|
||||
if (new_priority == current_priority) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
owner_process->RemoveConditionVariableThread(SharedFrom(this));
|
||||
}
|
||||
|
||||
SetCurrentPriority(new_priority);
|
||||
|
||||
if (GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
owner_process->InsertConditionVariableThread(SharedFrom(this));
|
||||
}
|
||||
|
||||
if (!lock_owner) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure that the thread is within the correct location in the waiting list.
|
||||
auto old_owner = lock_owner;
|
||||
lock_owner->RemoveMutexWaiter(SharedFrom(this));
|
||||
old_owner->AddMutexWaiter(SharedFrom(this));
|
||||
|
||||
// Recursively update the priority of the thread that depends on the priority of this one.
|
||||
lock_owner->UpdatePriority();
|
||||
}
|
||||
|
||||
bool Thread::AllSynchronizationObjectsReady() const {
|
||||
return std::none_of(wait_objects->begin(), wait_objects->end(),
|
||||
[this](const std::shared_ptr<SynchronizationObject>& object) {
|
||||
return object->ShouldWait(this);
|
||||
});
|
||||
}
|
||||
|
||||
bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
|
||||
ASSERT(hle_callback);
|
||||
return hle_callback(std::move(thread));
|
||||
}
|
||||
|
||||
ResultCode Thread::SetActivity(ThreadActivity value) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
auto sched_status = GetSchedulingStatus();
|
||||
|
||||
if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
if (IsPendingTermination()) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
if (value == ThreadActivity::Paused) {
|
||||
if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
|
||||
} else {
|
||||
if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode Thread::Sleep(s64 nanoseconds) {
|
||||
Handle event_handle{};
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
|
||||
SetStatus(ThreadStatus::WaitSleep);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
|
||||
const u32 old_state = scheduling_state;
|
||||
pausing_state |= static_cast<u32>(flag);
|
||||
const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
|
||||
scheduling_state = base_scheduling | pausing_state;
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
|
||||
const u32 old_state = scheduling_state;
|
||||
pausing_state &= ~static_cast<u32>(flag);
|
||||
const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
|
||||
scheduling_state = base_scheduling | pausing_state;
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
|
||||
const u32 old_state = scheduling_state;
|
||||
scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
|
||||
static_cast<u32>(new_status);
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
void Thread::SetCurrentPriority(u32 new_priority) {
|
||||
const u32 old_priority = std::exchange(current_priority, new_priority);
|
||||
KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
|
||||
old_priority);
|
||||
}
|
||||
|
||||
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
|
||||
for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
|
||||
if (((mask >> core) & 1) != 0) {
|
||||
return core;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
};
|
||||
|
||||
const bool use_override = affinity_override_count != 0;
|
||||
if (new_core == THREADPROCESSORID_DONT_UPDATE) {
|
||||
new_core = use_override ? ideal_core_override : ideal_core;
|
||||
if ((new_affinity_mask & (1ULL << new_core)) == 0) {
|
||||
LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}",
|
||||
new_core, new_affinity_mask);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
}
|
||||
if (use_override) {
|
||||
ideal_core_override = new_core;
|
||||
} else {
|
||||
const auto old_affinity_mask = affinity_mask;
|
||||
affinity_mask.SetAffinityMask(new_affinity_mask);
|
||||
ideal_core = new_core;
|
||||
if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) {
|
||||
const s32 old_core = processor_id;
|
||||
if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
|
||||
if (static_cast<s32>(ideal_core) < 0) {
|
||||
processor_id = HighestSetCore(affinity_mask.GetAffinityMask(),
|
||||
Core::Hardware::NUM_CPU_CORES);
|
||||
} else {
|
||||
processor_id = ideal_core;
|
||||
}
|
||||
}
|
||||
KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core);
|
||||
}
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
731
src/core/hle/kernel/thread.h
Executable file
731
src/core/hle/kernel/thread.h
Executable file
@@ -0,0 +1,731 @@
|
||||
// Copyright 2014 Citra Emulator Project / PPSSPP Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <functional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/hle/kernel/k_affinity_mask.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Common {
|
||||
class Fiber;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
class ARM_Interface;
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class GlobalSchedulerContext;
|
||||
class KernelCore;
|
||||
class Process;
|
||||
class KScheduler;
|
||||
|
||||
enum ThreadPriority : u32 {
|
||||
THREADPRIO_HIGHEST = 0, ///< Highest thread priority
|
||||
THREADPRIO_MAX_CORE_MIGRATION = 2, ///< Highest priority for a core migration
|
||||
THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
|
||||
THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
|
||||
THREADPRIO_LOWEST = 63, ///< Lowest thread priority
|
||||
THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities.
|
||||
};
|
||||
|
||||
enum ThreadType : u32 {
|
||||
THREADTYPE_USER = 0x1,
|
||||
THREADTYPE_KERNEL = 0x2,
|
||||
THREADTYPE_HLE = 0x4,
|
||||
THREADTYPE_IDLE = 0x8,
|
||||
THREADTYPE_SUSPEND = 0x10,
|
||||
};
|
||||
|
||||
enum ThreadProcessorId : s32 {
|
||||
/// Indicates that no particular processor core is preferred.
|
||||
THREADPROCESSORID_DONT_CARE = -1,
|
||||
|
||||
/// Run thread on the ideal core specified by the process.
|
||||
THREADPROCESSORID_IDEAL = -2,
|
||||
|
||||
/// Indicates that the preferred processor ID shouldn't be updated in
|
||||
/// a core mask setting operation.
|
||||
THREADPROCESSORID_DONT_UPDATE = -3,
|
||||
|
||||
THREADPROCESSORID_0 = 0, ///< Run thread on core 0
|
||||
THREADPROCESSORID_1 = 1, ///< Run thread on core 1
|
||||
THREADPROCESSORID_2 = 2, ///< Run thread on core 2
|
||||
THREADPROCESSORID_3 = 3, ///< Run thread on core 3
|
||||
THREADPROCESSORID_MAX = 4, ///< Processor ID must be less than this
|
||||
|
||||
/// Allowed CPU mask
|
||||
THREADPROCESSORID_DEFAULT_MASK = (1 << THREADPROCESSORID_0) | (1 << THREADPROCESSORID_1) |
|
||||
(1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3)
|
||||
};
|
||||
|
||||
enum class ThreadStatus {
|
||||
Ready, ///< Ready to run
|
||||
Paused, ///< Paused by SetThreadActivity or debug
|
||||
WaitHLEEvent, ///< Waiting for hle event to finish
|
||||
WaitSleep, ///< Waiting due to a SleepThread SVC
|
||||
WaitIPC, ///< Waiting for the reply from an IPC request
|
||||
WaitSynch, ///< Waiting due to WaitSynchronization
|
||||
WaitMutex, ///< Waiting due to an ArbitrateLock svc
|
||||
WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc
|
||||
WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc
|
||||
Dormant, ///< Created but not yet made ready
|
||||
Dead ///< Run to completion, or forcefully terminated
|
||||
};
|
||||
|
||||
enum class ThreadWakeupReason {
|
||||
Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal.
|
||||
Timeout // The thread was woken up due to a wait timeout.
|
||||
};
|
||||
|
||||
enum class ThreadActivity : u32 {
|
||||
Normal = 0,
|
||||
Paused = 1,
|
||||
};
|
||||
|
||||
enum class ThreadSchedStatus : u32 {
|
||||
None = 0,
|
||||
Paused = 1,
|
||||
Runnable = 2,
|
||||
Exited = 3,
|
||||
};
|
||||
|
||||
enum class ThreadSchedFlags : u32 {
|
||||
ProcessPauseFlag = 1 << 4,
|
||||
ThreadPauseFlag = 1 << 5,
|
||||
ProcessDebugPauseFlag = 1 << 6,
|
||||
KernelInitPauseFlag = 1 << 8,
|
||||
};
|
||||
|
||||
enum class ThreadSchedMasks : u32 {
|
||||
LowMask = 0x000f,
|
||||
HighMask = 0xfff0,
|
||||
ForcePauseMask = 0x0070,
|
||||
};
|
||||
|
||||
class Thread final : public SynchronizationObject {
|
||||
public:
|
||||
explicit Thread(KernelCore& kernel);
|
||||
~Thread() override;
|
||||
|
||||
using MutexWaitingThreads = std::vector<std::shared_ptr<Thread>>;
|
||||
|
||||
using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
|
||||
using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
|
||||
|
||||
using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>;
|
||||
|
||||
using HLECallback = std::function<bool(std::shared_ptr<Thread> thread)>;
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
* @param priority The thread's priority
|
||||
* @param arg User data to pass to the thread
|
||||
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
|
||||
* @param stack_top The address of the thread's stack top
|
||||
* @param owner_process The parent process for the thread, if null, it's a kernel thread
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process);
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
* @param priority The thread's priority
|
||||
* @param arg User data to pass to the thread
|
||||
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
|
||||
* @param stack_top The address of the thread's stack top
|
||||
* @param owner_process The parent process for the thread, if null, it's a kernel thread
|
||||
* @param thread_start_func The function where the host context will start.
|
||||
* @param thread_start_parameter The parameter which will passed to host context on init
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func,
|
||||
void* thread_start_parameter);
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
void SetName(std::string new_name) {
|
||||
name = std::move(new_name);
|
||||
}
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "Thread";
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
void Acquire(Thread* thread) override;
|
||||
bool IsSignaled() const override;
|
||||
|
||||
/**
|
||||
* Gets the thread's current priority
|
||||
* @return The current thread's priority
|
||||
*/
|
||||
u32 GetPriority() const {
|
||||
return current_priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the thread's nominal priority.
|
||||
* @return The current thread's nominal priority.
|
||||
*/
|
||||
u32 GetNominalPriority() const {
|
||||
return nominal_priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the thread's current priority
|
||||
* @param priority The new priority
|
||||
*/
|
||||
void SetPriority(u32 priority);
|
||||
|
||||
/// Adds a thread to the list of threads that are waiting for a lock held by this thread.
|
||||
void AddMutexWaiter(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the list of threads that are waiting for a lock held by this thread.
|
||||
void RemoveMutexWaiter(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Recalculates the current priority taking into account priority inheritance.
|
||||
void UpdatePriority();
|
||||
|
||||
/// Changes the core that the thread is running or scheduled to run on.
|
||||
ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
|
||||
|
||||
/**
|
||||
* Gets the thread's thread ID
|
||||
* @return The thread's ID
|
||||
*/
|
||||
u64 GetThreadID() const {
|
||||
return thread_id;
|
||||
}
|
||||
|
||||
/// Resumes a thread from waiting
|
||||
void ResumeFromWait();
|
||||
|
||||
void OnWakeUp();
|
||||
|
||||
ResultCode Start();
|
||||
|
||||
/// Cancels a waiting operation that this thread may or may not be within.
|
||||
///
|
||||
/// When the thread is within a waiting state, this will set the thread's
|
||||
/// waiting result to signal a canceled wait. The function will then resume
|
||||
/// this thread.
|
||||
///
|
||||
void CancelWait();
|
||||
|
||||
void SetSynchronizationResults(SynchronizationObject* object, ResultCode result);
|
||||
|
||||
SynchronizationObject* GetSignalingObject() const {
|
||||
return signaling_object;
|
||||
}
|
||||
|
||||
ResultCode GetSignalingResult() const {
|
||||
return signaling_result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the index that this particular object occupies in the list of objects
|
||||
* that the thread passed to WaitSynchronization, starting the search from the last element.
|
||||
*
|
||||
* It is used to set the output index of WaitSynchronization when the thread is awakened.
|
||||
*
|
||||
* When a thread wakes up due to an object signal, the kernel will use the index of the last
|
||||
* matching object in the wait objects list in case of having multiple instances of the same
|
||||
* object in the list.
|
||||
*
|
||||
* @param object Object to query the index of.
|
||||
*/
|
||||
s32 GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const;
|
||||
|
||||
/**
|
||||
* Stops a thread, invalidating it from further use
|
||||
*/
|
||||
void Stop();
|
||||
|
||||
/*
|
||||
* Returns the Thread Local Storage address of the current thread
|
||||
* @returns VAddr of the thread's TLS
|
||||
*/
|
||||
VAddr GetTLSAddress() const {
|
||||
return tls_address;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
|
||||
* @returns The value of the TPIDR_EL0 register.
|
||||
*/
|
||||
u64 GetTPIDR_EL0() const {
|
||||
return tpidr_el0;
|
||||
}
|
||||
|
||||
/// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
|
||||
void SetTPIDR_EL0(u64 value) {
|
||||
tpidr_el0 = value;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the address of the current thread's command buffer, located in the TLS.
|
||||
* @returns VAddr of the thread's command buffer.
|
||||
*/
|
||||
VAddr GetCommandBufferAddress() const;
|
||||
|
||||
ThreadContext32& GetContext32() {
|
||||
return context_32;
|
||||
}
|
||||
|
||||
const ThreadContext32& GetContext32() const {
|
||||
return context_32;
|
||||
}
|
||||
|
||||
ThreadContext64& GetContext64() {
|
||||
return context_64;
|
||||
}
|
||||
|
||||
const ThreadContext64& GetContext64() const {
|
||||
return context_64;
|
||||
}
|
||||
|
||||
bool IsHLEThread() const {
|
||||
return (type & THREADTYPE_HLE) != 0;
|
||||
}
|
||||
|
||||
bool IsSuspendThread() const {
|
||||
return (type & THREADTYPE_SUSPEND) != 0;
|
||||
}
|
||||
|
||||
bool IsIdleThread() const {
|
||||
return (type & THREADTYPE_IDLE) != 0;
|
||||
}
|
||||
|
||||
bool WasRunning() const {
|
||||
return was_running;
|
||||
}
|
||||
|
||||
void SetWasRunning(bool value) {
|
||||
was_running = value;
|
||||
}
|
||||
|
||||
std::shared_ptr<Common::Fiber>& GetHostContext();
|
||||
|
||||
ThreadStatus GetStatus() const {
|
||||
return status;
|
||||
}
|
||||
|
||||
void SetStatus(ThreadStatus new_status);
|
||||
|
||||
s64 GetLastScheduledTick() const {
|
||||
return this->last_scheduled_tick;
|
||||
}
|
||||
|
||||
void SetLastScheduledTick(s64 tick) {
|
||||
this->last_scheduled_tick = tick;
|
||||
}
|
||||
|
||||
u64 GetTotalCPUTimeTicks() const {
|
||||
return total_cpu_time_ticks;
|
||||
}
|
||||
|
||||
void UpdateCPUTimeTicks(u64 ticks) {
|
||||
total_cpu_time_ticks += ticks;
|
||||
}
|
||||
|
||||
s32 GetProcessorID() const {
|
||||
return processor_id;
|
||||
}
|
||||
|
||||
s32 GetActiveCore() const {
|
||||
return GetProcessorID();
|
||||
}
|
||||
|
||||
void SetProcessorID(s32 new_core) {
|
||||
processor_id = new_core;
|
||||
}
|
||||
|
||||
void SetActiveCore(s32 new_core) {
|
||||
processor_id = new_core;
|
||||
}
|
||||
|
||||
Process* GetOwnerProcess() {
|
||||
return owner_process;
|
||||
}
|
||||
|
||||
const Process* GetOwnerProcess() const {
|
||||
return owner_process;
|
||||
}
|
||||
|
||||
const ThreadSynchronizationObjects& GetSynchronizationObjects() const {
|
||||
return *wait_objects;
|
||||
}
|
||||
|
||||
void SetSynchronizationObjects(ThreadSynchronizationObjects* objects) {
|
||||
wait_objects = objects;
|
||||
}
|
||||
|
||||
void ClearSynchronizationObjects() {
|
||||
for (const auto& waiting_object : *wait_objects) {
|
||||
waiting_object->RemoveWaitingThread(SharedFrom(this));
|
||||
}
|
||||
wait_objects->clear();
|
||||
}
|
||||
|
||||
/// Determines whether all the objects this thread is waiting on are ready.
|
||||
bool AllSynchronizationObjectsReady() const;
|
||||
|
||||
const MutexWaitingThreads& GetMutexWaitingThreads() const {
|
||||
return wait_mutex_threads;
|
||||
}
|
||||
|
||||
Thread* GetLockOwner() const {
|
||||
return lock_owner.get();
|
||||
}
|
||||
|
||||
void SetLockOwner(std::shared_ptr<Thread> owner) {
|
||||
lock_owner = std::move(owner);
|
||||
}
|
||||
|
||||
VAddr GetCondVarWaitAddress() const {
|
||||
return condvar_wait_address;
|
||||
}
|
||||
|
||||
void SetCondVarWaitAddress(VAddr address) {
|
||||
condvar_wait_address = address;
|
||||
}
|
||||
|
||||
VAddr GetMutexWaitAddress() const {
|
||||
return mutex_wait_address;
|
||||
}
|
||||
|
||||
void SetMutexWaitAddress(VAddr address) {
|
||||
mutex_wait_address = address;
|
||||
}
|
||||
|
||||
Handle GetWaitHandle() const {
|
||||
return wait_handle;
|
||||
}
|
||||
|
||||
void SetWaitHandle(Handle handle) {
|
||||
wait_handle = handle;
|
||||
}
|
||||
|
||||
VAddr GetArbiterWaitAddress() const {
|
||||
return arb_wait_address;
|
||||
}
|
||||
|
||||
void SetArbiterWaitAddress(VAddr address) {
|
||||
arb_wait_address = address;
|
||||
}
|
||||
|
||||
bool HasHLECallback() const {
|
||||
return hle_callback != nullptr;
|
||||
}
|
||||
|
||||
void SetHLECallback(HLECallback callback) {
|
||||
hle_callback = std::move(callback);
|
||||
}
|
||||
|
||||
void SetHLETimeEvent(Handle time_event) {
|
||||
hle_time_event = time_event;
|
||||
}
|
||||
|
||||
void SetHLESyncObject(SynchronizationObject* object) {
|
||||
hle_object = object;
|
||||
}
|
||||
|
||||
Handle GetHLETimeEvent() const {
|
||||
return hle_time_event;
|
||||
}
|
||||
|
||||
SynchronizationObject* GetHLESyncObject() const {
|
||||
return hle_object;
|
||||
}
|
||||
|
||||
void InvalidateHLECallback() {
|
||||
SetHLECallback(nullptr);
|
||||
}
|
||||
|
||||
bool InvokeHLECallback(std::shared_ptr<Thread> thread);
|
||||
|
||||
u32 GetIdealCore() const {
|
||||
return ideal_core;
|
||||
}
|
||||
|
||||
const KAffinityMask& GetAffinityMask() const {
|
||||
return affinity_mask;
|
||||
}
|
||||
|
||||
ResultCode SetActivity(ThreadActivity value);
|
||||
|
||||
/// Sleeps this thread for the given amount of nanoseconds.
|
||||
ResultCode Sleep(s64 nanoseconds);
|
||||
|
||||
s64 GetYieldScheduleCount() const {
|
||||
return this->schedule_count;
|
||||
}
|
||||
|
||||
void SetYieldScheduleCount(s64 count) {
|
||||
this->schedule_count = count;
|
||||
}
|
||||
|
||||
ThreadSchedStatus GetSchedulingStatus() const {
|
||||
return static_cast<ThreadSchedStatus>(scheduling_state &
|
||||
static_cast<u32>(ThreadSchedMasks::LowMask));
|
||||
}
|
||||
|
||||
bool IsRunnable() const {
|
||||
return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable);
|
||||
}
|
||||
|
||||
bool IsRunning() const {
|
||||
return is_running;
|
||||
}
|
||||
|
||||
void SetIsRunning(bool value) {
|
||||
is_running = value;
|
||||
}
|
||||
|
||||
bool IsSyncCancelled() const {
|
||||
return is_sync_cancelled;
|
||||
}
|
||||
|
||||
void SetSyncCancelled(bool value) {
|
||||
is_sync_cancelled = value;
|
||||
}
|
||||
|
||||
Handle GetGlobalHandle() const {
|
||||
return global_handle;
|
||||
}
|
||||
|
||||
bool IsWaitingForArbitration() const {
|
||||
return waiting_for_arbitration;
|
||||
}
|
||||
|
||||
void WaitForArbitration(bool set) {
|
||||
waiting_for_arbitration = set;
|
||||
}
|
||||
|
||||
bool IsWaitingSync() const {
|
||||
return is_waiting_on_sync;
|
||||
}
|
||||
|
||||
void SetWaitingSync(bool is_waiting) {
|
||||
is_waiting_on_sync = is_waiting;
|
||||
}
|
||||
|
||||
bool IsPendingTermination() const {
|
||||
return will_be_terminated || GetSchedulingStatus() == ThreadSchedStatus::Exited;
|
||||
}
|
||||
|
||||
bool IsPaused() const {
|
||||
return pausing_state != 0;
|
||||
}
|
||||
|
||||
bool IsContinuousOnSVC() const {
|
||||
return is_continuous_on_svc;
|
||||
}
|
||||
|
||||
void SetContinuousOnSVC(bool is_continuous) {
|
||||
is_continuous_on_svc = is_continuous;
|
||||
}
|
||||
|
||||
bool IsPhantomMode() const {
|
||||
return is_phantom_mode;
|
||||
}
|
||||
|
||||
void SetPhantomMode(bool phantom) {
|
||||
is_phantom_mode = phantom;
|
||||
}
|
||||
|
||||
bool HasExited() const {
|
||||
return has_exited;
|
||||
}
|
||||
|
||||
class QueueEntry {
|
||||
public:
|
||||
constexpr QueueEntry() = default;
|
||||
|
||||
constexpr void Initialize() {
|
||||
this->prev = nullptr;
|
||||
this->next = nullptr;
|
||||
}
|
||||
|
||||
constexpr Thread* GetPrev() const {
|
||||
return this->prev;
|
||||
}
|
||||
constexpr Thread* GetNext() const {
|
||||
return this->next;
|
||||
}
|
||||
constexpr void SetPrev(Thread* thread) {
|
||||
this->prev = thread;
|
||||
}
|
||||
constexpr void SetNext(Thread* thread) {
|
||||
this->next = thread;
|
||||
}
|
||||
|
||||
private:
|
||||
Thread* prev{};
|
||||
Thread* next{};
|
||||
};
|
||||
|
||||
QueueEntry& GetPriorityQueueEntry(s32 core) {
|
||||
return this->per_core_priority_queue_entry[core];
|
||||
}
|
||||
|
||||
const QueueEntry& GetPriorityQueueEntry(s32 core) const {
|
||||
return this->per_core_priority_queue_entry[core];
|
||||
}
|
||||
|
||||
s32 GetDisableDispatchCount() const {
|
||||
return disable_count;
|
||||
}
|
||||
|
||||
void DisableDispatch() {
|
||||
ASSERT(GetDisableDispatchCount() >= 0);
|
||||
disable_count++;
|
||||
}
|
||||
|
||||
void EnableDispatch() {
|
||||
ASSERT(GetDisableDispatchCount() > 0);
|
||||
disable_count--;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class GlobalSchedulerContext;
|
||||
friend class KScheduler;
|
||||
friend class Process;
|
||||
|
||||
void SetSchedulingStatus(ThreadSchedStatus new_status);
|
||||
void AddSchedulingFlag(ThreadSchedFlags flag);
|
||||
void RemoveSchedulingFlag(ThreadSchedFlags flag);
|
||||
|
||||
void SetCurrentPriority(u32 new_priority);
|
||||
|
||||
Common::SpinLock context_guard{};
|
||||
ThreadContext32 context_32{};
|
||||
ThreadContext64 context_64{};
|
||||
std::shared_ptr<Common::Fiber> host_context{};
|
||||
|
||||
ThreadStatus status = ThreadStatus::Dormant;
|
||||
u32 scheduling_state = 0;
|
||||
|
||||
u64 thread_id = 0;
|
||||
|
||||
VAddr entry_point = 0;
|
||||
VAddr stack_top = 0;
|
||||
std::atomic_int disable_count = 0;
|
||||
|
||||
ThreadType type;
|
||||
|
||||
/// Nominal thread priority, as set by the emulated application.
|
||||
/// The nominal priority is the thread priority without priority
|
||||
/// inheritance taken into account.
|
||||
u32 nominal_priority = 0;
|
||||
|
||||
/// Current thread priority. This may change over the course of the
|
||||
/// thread's lifetime in order to facilitate priority inheritance.
|
||||
u32 current_priority = 0;
|
||||
|
||||
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
|
||||
s64 schedule_count{};
|
||||
s64 last_scheduled_tick{};
|
||||
|
||||
s32 processor_id = 0;
|
||||
|
||||
VAddr tls_address = 0; ///< Virtual address of the Thread Local Storage of the thread
|
||||
u64 tpidr_el0 = 0; ///< TPIDR_EL0 read/write system register.
|
||||
|
||||
/// Process that owns this thread
|
||||
Process* owner_process;
|
||||
|
||||
/// Objects that the thread is waiting on, in the same order as they were
|
||||
/// passed to WaitSynchronization.
|
||||
ThreadSynchronizationObjects* wait_objects;
|
||||
|
||||
SynchronizationObject* signaling_object;
|
||||
ResultCode signaling_result{RESULT_SUCCESS};
|
||||
|
||||
/// List of threads that are waiting for a mutex that is held by this thread.
|
||||
MutexWaitingThreads wait_mutex_threads;
|
||||
|
||||
/// Thread that owns the lock that this thread is waiting for.
|
||||
std::shared_ptr<Thread> lock_owner;
|
||||
|
||||
/// If waiting on a ConditionVariable, this is the ConditionVariable address
|
||||
VAddr condvar_wait_address = 0;
|
||||
/// If waiting on a Mutex, this is the mutex address
|
||||
VAddr mutex_wait_address = 0;
|
||||
/// The handle used to wait for the mutex.
|
||||
Handle wait_handle = 0;
|
||||
|
||||
/// If waiting for an AddressArbiter, this is the address being waited on.
|
||||
VAddr arb_wait_address{0};
|
||||
bool waiting_for_arbitration{};
|
||||
|
||||
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
|
||||
Handle global_handle = 0;
|
||||
|
||||
/// Callback for HLE Events
|
||||
HLECallback hle_callback;
|
||||
Handle hle_time_event;
|
||||
SynchronizationObject* hle_object;
|
||||
|
||||
KScheduler* scheduler = nullptr;
|
||||
|
||||
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
|
||||
|
||||
u32 ideal_core{0xFFFFFFFF};
|
||||
KAffinityMask affinity_mask{};
|
||||
|
||||
s32 ideal_core_override = -1;
|
||||
u32 affinity_override_count = 0;
|
||||
|
||||
u32 pausing_state = 0;
|
||||
bool is_running = false;
|
||||
bool is_waiting_on_sync = false;
|
||||
bool is_sync_cancelled = false;
|
||||
|
||||
bool is_continuous_on_svc = false;
|
||||
|
||||
bool will_be_terminated = false;
|
||||
bool is_phantom_mode = false;
|
||||
bool has_exited = false;
|
||||
|
||||
bool was_running = false;
|
||||
|
||||
std::string name;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
70
src/core/hle/kernel/time_manager.cpp
Executable file
70
src/core/hle/kernel/time_manager.cpp
Executable file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||
time_manager_event_type = Core::Timing::CreateEvent(
|
||||
"Kernel::TimeManagerCallback",
|
||||
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
||||
const KScopedSchedulerLock lock(system.Kernel());
|
||||
const auto proper_handle = static_cast<Handle>(thread_handle);
|
||||
|
||||
std::shared_ptr<Thread> thread;
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (cancelled_events[proper_handle]) {
|
||||
return;
|
||||
}
|
||||
thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
|
||||
}
|
||||
|
||||
if (thread) {
|
||||
// Thread can be null if process has exited
|
||||
thread->OnWakeUp();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) {
|
||||
std::lock_guard lock{mutex};
|
||||
event_handle = timetask->GetGlobalHandle();
|
||||
if (nanoseconds > 0) {
|
||||
ASSERT(timetask);
|
||||
ASSERT(timetask->GetStatus() != ThreadStatus::Ready);
|
||||
ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
|
||||
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds},
|
||||
time_manager_event_type, event_handle);
|
||||
} else {
|
||||
event_handle = InvalidHandle;
|
||||
}
|
||||
cancelled_events[event_handle] = false;
|
||||
}
|
||||
|
||||
void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
|
||||
std::lock_guard lock{mutex};
|
||||
if (event_handle == InvalidHandle) {
|
||||
return;
|
||||
}
|
||||
system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle);
|
||||
cancelled_events[event_handle] = true;
|
||||
}
|
||||
|
||||
void TimeManager::CancelTimeEvent(Thread* time_task) {
|
||||
std::lock_guard lock{mutex};
|
||||
const Handle event_handle = time_task->GetGlobalHandle();
|
||||
UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
49
src/core/hle/kernel/time_manager.h
Executable file
49
src/core/hle/kernel/time_manager.h
Executable file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Core::Timing {
|
||||
struct EventType;
|
||||
} // namespace Core::Timing
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Thread;
|
||||
|
||||
/**
|
||||
* The `TimeManager` takes care of scheduling time events on threads and executes their TimeUp
|
||||
* method when the event is triggered.
|
||||
*/
|
||||
class TimeManager {
|
||||
public:
|
||||
explicit TimeManager(Core::System& system);
|
||||
|
||||
/// Schedule a time event on `timetask` thread that will expire in 'nanoseconds'
|
||||
/// returns a non-invalid handle in `event_handle` if correctly scheduled
|
||||
void ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds);
|
||||
|
||||
/// Unschedule an existing time event
|
||||
void UnscheduleTimeEvent(Handle event_handle);
|
||||
|
||||
void CancelTimeEvent(Thread* time_task);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
|
||||
std::unordered_map<Handle, bool> cancelled_events;
|
||||
std::mutex mutex;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
49
src/core/hle/kernel/transfer_memory.cpp
Executable file
49
src/core/hle/kernel/transfer_memory.cpp
Executable file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/transfer_memory.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
TransferMemory::TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory)
|
||||
: Object{kernel}, memory{memory} {}
|
||||
|
||||
TransferMemory::~TransferMemory() {
|
||||
// Release memory region when transfer memory is destroyed
|
||||
Reset();
|
||||
}
|
||||
|
||||
std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel,
|
||||
Core::Memory::Memory& memory,
|
||||
VAddr base_address, std::size_t size,
|
||||
Memory::MemoryPermission permissions) {
|
||||
std::shared_ptr<TransferMemory> transfer_memory{
|
||||
std::make_shared<TransferMemory>(kernel, memory)};
|
||||
|
||||
transfer_memory->base_address = base_address;
|
||||
transfer_memory->size = size;
|
||||
transfer_memory->owner_permissions = permissions;
|
||||
transfer_memory->owner_process = kernel.CurrentProcess();
|
||||
|
||||
return transfer_memory;
|
||||
}
|
||||
|
||||
const u8* TransferMemory::GetPointer() const {
|
||||
return memory.GetPointer(base_address);
|
||||
}
|
||||
|
||||
ResultCode TransferMemory::Reserve() {
|
||||
return owner_process->PageTable().ReserveTransferMemory(base_address, size, owner_permissions);
|
||||
}
|
||||
|
||||
ResultCode TransferMemory::Reset() {
|
||||
return owner_process->PageTable().ResetTransferMemory(base_address, size);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
91
src/core/hle/kernel/transfer_memory.h
Executable file
91
src/core/hle/kernel/transfer_memory.h
Executable file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/physical_memory.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Process;
|
||||
|
||||
/// Defines the interface for transfer memory objects.
|
||||
///
|
||||
/// Transfer memory is typically used for the purpose of
|
||||
/// transferring memory between separate process instances,
|
||||
/// thus the name.
|
||||
///
|
||||
class TransferMemory final : public Object {
|
||||
public:
|
||||
explicit TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory);
|
||||
~TransferMemory() override;
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory;
|
||||
|
||||
static std::shared_ptr<TransferMemory> Create(KernelCore& kernel, Core::Memory::Memory& memory,
|
||||
VAddr base_address, std::size_t size,
|
||||
Memory::MemoryPermission permissions);
|
||||
|
||||
TransferMemory(const TransferMemory&) = delete;
|
||||
TransferMemory& operator=(const TransferMemory&) = delete;
|
||||
|
||||
TransferMemory(TransferMemory&&) = delete;
|
||||
TransferMemory& operator=(TransferMemory&&) = delete;
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "TransferMemory";
|
||||
}
|
||||
|
||||
std::string GetName() const override {
|
||||
return GetTypeName();
|
||||
}
|
||||
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
/// Gets a pointer to the backing block of this instance.
|
||||
const u8* GetPointer() const;
|
||||
|
||||
/// Gets the size of the memory backing this instance in bytes.
|
||||
constexpr std::size_t GetSize() const {
|
||||
return size;
|
||||
}
|
||||
|
||||
/// Reserves the region to be used for the transfer memory, called after the transfer memory is
|
||||
/// created.
|
||||
ResultCode Reserve();
|
||||
|
||||
/// Resets the region previously used for the transfer memory, called after the transfer memory
|
||||
/// is closed.
|
||||
ResultCode Reset();
|
||||
|
||||
private:
|
||||
/// The base address for the memory managed by this instance.
|
||||
VAddr base_address{};
|
||||
|
||||
/// Size of the memory, in bytes, that this instance manages.
|
||||
std::size_t size{};
|
||||
|
||||
/// The memory permissions that are applied to this instance.
|
||||
Memory::MemoryPermission owner_permissions{};
|
||||
|
||||
/// The process that this transfer memory instance was created under.
|
||||
Process* owner_process{};
|
||||
|
||||
Core::Memory::Memory& memory;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
45
src/core/hle/kernel/writable_event.cpp
Executable file
45
src/core/hle/kernel/writable_event.cpp
Executable file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
WritableEvent::WritableEvent(KernelCore& kernel) : Object{kernel} {}
|
||||
WritableEvent::~WritableEvent() = default;
|
||||
|
||||
EventPair WritableEvent::CreateEventPair(KernelCore& kernel, std::string name) {
|
||||
std::shared_ptr<WritableEvent> writable_event(new WritableEvent(kernel));
|
||||
std::shared_ptr<ReadableEvent> readable_event(new ReadableEvent(kernel));
|
||||
|
||||
writable_event->name = name + ":Writable";
|
||||
writable_event->readable = readable_event;
|
||||
readable_event->name = name + ":Readable";
|
||||
|
||||
return {std::move(readable_event), std::move(writable_event)};
|
||||
}
|
||||
|
||||
std::shared_ptr<ReadableEvent> WritableEvent::GetReadableEvent() const {
|
||||
return readable;
|
||||
}
|
||||
|
||||
void WritableEvent::Signal() {
|
||||
readable->Signal();
|
||||
}
|
||||
|
||||
void WritableEvent::Clear() {
|
||||
readable->Clear();
|
||||
}
|
||||
|
||||
bool WritableEvent::IsSignaled() const {
|
||||
return readable->IsSignaled();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
59
src/core/hle/kernel/writable_event.h
Executable file
59
src/core/hle/kernel/writable_event.h
Executable file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class ReadableEvent;
|
||||
class WritableEvent;
|
||||
|
||||
struct EventPair {
|
||||
std::shared_ptr<ReadableEvent> readable;
|
||||
std::shared_ptr<WritableEvent> writable;
|
||||
};
|
||||
|
||||
class WritableEvent final : public Object {
|
||||
public:
|
||||
~WritableEvent() override;
|
||||
|
||||
/**
|
||||
* Creates an event
|
||||
* @param kernel The kernel instance to create this event under.
|
||||
* @param name Optional name of event
|
||||
*/
|
||||
static EventPair CreateEventPair(KernelCore& kernel, std::string name = "Unknown");
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "WritableEvent";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::WritableEvent;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
std::shared_ptr<ReadableEvent> GetReadableEvent() const;
|
||||
|
||||
void Signal();
|
||||
void Clear();
|
||||
bool IsSignaled() const;
|
||||
|
||||
private:
|
||||
explicit WritableEvent(KernelCore& kernel);
|
||||
|
||||
std::shared_ptr<ReadableEvent> readable;
|
||||
|
||||
std::string name; ///< Name of event (optional)
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
9
src/core/hle/lock.cpp
Executable file
9
src/core/hle/lock.cpp
Executable file
@@ -0,0 +1,9 @@
|
||||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <core/hle/lock.h>
|
||||
|
||||
namespace HLE {
|
||||
std::recursive_mutex g_hle_lock;
|
||||
}
|
||||
18
src/core/hle/lock.h
Executable file
18
src/core/hle/lock.h
Executable file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
|
||||
namespace HLE {
|
||||
/*
|
||||
* Synchronizes access to the internal HLE kernel structures, it is acquired when a guest
|
||||
* application thread performs a syscall. It should be acquired by any host threads that read or
|
||||
* modify the HLE kernel state. Note: Any operation that directly or indirectly reads from or writes
|
||||
* to the emulated memory is not protected by this mutex, and should be avoided in any threads other
|
||||
* than the CPU thread.
|
||||
*/
|
||||
extern std::recursive_mutex g_hle_lock;
|
||||
} // namespace HLE
|
||||
360
src/core/hle/result.h
Executable file
360
src/core/hle/result.h
Executable file
@@ -0,0 +1,360 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <new>
|
||||
#include <utility>
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
// All the constants in this file come from http://switchbrew.org/index.php?title=Error_codes
|
||||
|
||||
/**
|
||||
* Identifies the module which caused the error. Error codes can be propagated through a call
|
||||
* chain, meaning that this doesn't always correspond to the module where the API call made is
|
||||
* contained.
|
||||
*/
|
||||
enum class ErrorModule : u32 {
|
||||
Common = 0,
|
||||
Kernel = 1,
|
||||
FS = 2,
|
||||
OS = 3, // used for Memory, Thread, Mutex, Nvidia
|
||||
HTCS = 4,
|
||||
NCM = 5,
|
||||
DD = 6,
|
||||
LR = 8,
|
||||
Loader = 9,
|
||||
CMIF = 10,
|
||||
HIPC = 11,
|
||||
PM = 15,
|
||||
NS = 16,
|
||||
HTC = 18,
|
||||
NCMContent = 20,
|
||||
SM = 21,
|
||||
RO = 22,
|
||||
SDMMC = 24,
|
||||
OVLN = 25,
|
||||
SPL = 26,
|
||||
ETHC = 100,
|
||||
I2C = 101,
|
||||
GPIO = 102,
|
||||
UART = 103,
|
||||
Settings = 105,
|
||||
WLAN = 107,
|
||||
XCD = 108,
|
||||
NIFM = 110,
|
||||
Hwopus = 111,
|
||||
Bluetooth = 113,
|
||||
VI = 114,
|
||||
NFP = 115,
|
||||
Time = 116,
|
||||
FGM = 117,
|
||||
OE = 118,
|
||||
PCIe = 120,
|
||||
Friends = 121,
|
||||
BCAT = 122,
|
||||
SSL = 123,
|
||||
Account = 124,
|
||||
News = 125,
|
||||
Mii = 126,
|
||||
NFC = 127,
|
||||
AM = 128,
|
||||
PlayReport = 129,
|
||||
AHID = 130,
|
||||
Qlaunch = 132,
|
||||
PCV = 133,
|
||||
OMM = 134,
|
||||
BPC = 135,
|
||||
PSM = 136,
|
||||
NIM = 137,
|
||||
PSC = 138,
|
||||
TC = 139,
|
||||
USB = 140,
|
||||
NSD = 141,
|
||||
PCTL = 142,
|
||||
BTM = 143,
|
||||
ETicket = 145,
|
||||
NGC = 146,
|
||||
ERPT = 147,
|
||||
APM = 148,
|
||||
Profiler = 150,
|
||||
ErrorUpload = 151,
|
||||
Audio = 153,
|
||||
NPNS = 154,
|
||||
NPNSHTTPSTREAM = 155,
|
||||
ARP = 157,
|
||||
SWKBD = 158,
|
||||
BOOT = 159,
|
||||
NFCMifare = 161,
|
||||
UserlandAssert = 162,
|
||||
Fatal = 163,
|
||||
NIMShop = 164,
|
||||
SPSM = 165,
|
||||
BGTC = 167,
|
||||
UserlandCrash = 168,
|
||||
SREPO = 180,
|
||||
Dauth = 181,
|
||||
HID = 202,
|
||||
LDN = 203,
|
||||
Irsensor = 205,
|
||||
Capture = 206,
|
||||
Manu = 208,
|
||||
ATK = 209,
|
||||
GRC = 212,
|
||||
Migration = 216,
|
||||
MigrationLdcServ = 217,
|
||||
GeneralWebApplet = 800,
|
||||
WifiWebAuthApplet = 809,
|
||||
WhitelistedApplet = 810,
|
||||
ShopN = 811,
|
||||
};
|
||||
|
||||
/// Encapsulates a Horizon OS error code, allowing it to be separated into its constituent fields.
|
||||
union ResultCode {
|
||||
u32 raw;
|
||||
|
||||
BitField<0, 9, ErrorModule> module;
|
||||
BitField<9, 13, u32> description;
|
||||
|
||||
constexpr explicit ResultCode(u32 raw_) : raw(raw_) {}
|
||||
|
||||
constexpr ResultCode(ErrorModule module_, u32 description_)
|
||||
: raw(module.FormatValue(module_) | description.FormatValue(description_)) {}
|
||||
|
||||
constexpr bool IsSuccess() const {
|
||||
return raw == 0;
|
||||
}
|
||||
|
||||
constexpr bool IsError() const {
|
||||
return raw != 0;
|
||||
}
|
||||
};
|
||||
|
||||
constexpr bool operator==(const ResultCode& a, const ResultCode& b) {
|
||||
return a.raw == b.raw;
|
||||
}
|
||||
|
||||
constexpr bool operator!=(const ResultCode& a, const ResultCode& b) {
|
||||
return a.raw != b.raw;
|
||||
}
|
||||
|
||||
// Convenience functions for creating some common kinds of errors:
|
||||
|
||||
/// The default success `ResultCode`.
|
||||
constexpr ResultCode RESULT_SUCCESS(0);
|
||||
|
||||
/**
|
||||
* Placeholder result code used for unknown error codes.
|
||||
*
|
||||
* @note This should only be used when a particular error code
|
||||
* is not known yet.
|
||||
*/
|
||||
constexpr ResultCode RESULT_UNKNOWN(UINT32_MAX);
|
||||
|
||||
/**
|
||||
* This is an optional value type. It holds a `ResultCode` and, if that code is a success code,
|
||||
* also holds a result of type `T`. If the code is an error code then trying to access the inner
|
||||
* value fails, thus ensuring that the ResultCode of functions is always checked properly before
|
||||
* their return value is used. It is similar in concept to the `std::optional` type
|
||||
* (http://en.cppreference.com/w/cpp/experimental/optional) originally proposed for inclusion in
|
||||
* C++14, or the `Result` type in Rust (http://doc.rust-lang.org/std/result/index.html).
|
||||
*
|
||||
* An example of how it could be used:
|
||||
* \code
|
||||
* ResultVal<int> Frobnicate(float strength) {
|
||||
* if (strength < 0.f || strength > 1.0f) {
|
||||
* // Can't frobnicate too weakly or too strongly
|
||||
* return ResultCode(ErrorDescription::OutOfRange, ErrorModule::Common,
|
||||
* ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
|
||||
* } else {
|
||||
* // Frobnicated! Give caller a cookie
|
||||
* return MakeResult<int>(42);
|
||||
* }
|
||||
* }
|
||||
* \endcode
|
||||
*
|
||||
* \code
|
||||
* ResultVal<int> frob_result = Frobnicate(0.75f);
|
||||
* if (frob_result) {
|
||||
* // Frobbed ok
|
||||
* printf("My cookie is %d\n", *frob_result);
|
||||
* } else {
|
||||
* printf("Guess I overdid it. :( Error code: %ux\n", frob_result.code().hex);
|
||||
* }
|
||||
* \endcode
|
||||
*/
|
||||
template <typename T>
|
||||
class ResultVal {
|
||||
public:
|
||||
/// Constructs an empty `ResultVal` with the given error code. The code must not be a success
|
||||
/// code.
|
||||
ResultVal(ResultCode error_code = RESULT_UNKNOWN) : result_code(error_code) {
|
||||
ASSERT(error_code.IsError());
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to the non-member function `MakeResult`, with the exception that you can manually
|
||||
* specify the success code. `success_code` must not be an error code.
|
||||
*/
|
||||
template <typename... Args>
|
||||
static ResultVal WithCode(ResultCode success_code, Args&&... args) {
|
||||
ResultVal<T> result;
|
||||
result.emplace(success_code, std::forward<Args>(args)...);
|
||||
return result;
|
||||
}
|
||||
|
||||
ResultVal(const ResultVal& o) : result_code(o.result_code) {
|
||||
if (!o.empty()) {
|
||||
new (&object) T(o.object);
|
||||
}
|
||||
}
|
||||
|
||||
ResultVal(ResultVal&& o) noexcept : result_code(o.result_code) {
|
||||
if (!o.empty()) {
|
||||
new (&object) T(std::move(o.object));
|
||||
}
|
||||
}
|
||||
|
||||
~ResultVal() {
|
||||
if (!empty()) {
|
||||
object.~T();
|
||||
}
|
||||
}
|
||||
|
||||
ResultVal& operator=(const ResultVal& o) {
|
||||
if (this == &o) {
|
||||
return *this;
|
||||
}
|
||||
if (!empty()) {
|
||||
if (!o.empty()) {
|
||||
object = o.object;
|
||||
} else {
|
||||
object.~T();
|
||||
}
|
||||
} else {
|
||||
if (!o.empty()) {
|
||||
new (&object) T(o.object);
|
||||
}
|
||||
}
|
||||
result_code = o.result_code;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Replaces the current result with a new constructed result value in-place. The code must not
|
||||
* be an error code.
|
||||
*/
|
||||
template <typename... Args>
|
||||
void emplace(ResultCode success_code, Args&&... args) {
|
||||
ASSERT(success_code.IsSuccess());
|
||||
if (!empty()) {
|
||||
object.~T();
|
||||
}
|
||||
new (&object) T(std::forward<Args>(args)...);
|
||||
result_code = success_code;
|
||||
}
|
||||
|
||||
/// Returns true if the `ResultVal` contains an error code and no value.
|
||||
bool empty() const {
|
||||
return result_code.IsError();
|
||||
}
|
||||
|
||||
/// Returns true if the `ResultVal` contains a return value.
|
||||
bool Succeeded() const {
|
||||
return result_code.IsSuccess();
|
||||
}
|
||||
/// Returns true if the `ResultVal` contains an error code and no value.
|
||||
bool Failed() const {
|
||||
return empty();
|
||||
}
|
||||
|
||||
ResultCode Code() const {
|
||||
return result_code;
|
||||
}
|
||||
|
||||
const T& operator*() const {
|
||||
return object;
|
||||
}
|
||||
T& operator*() {
|
||||
return object;
|
||||
}
|
||||
const T* operator->() const {
|
||||
return &object;
|
||||
}
|
||||
T* operator->() {
|
||||
return &object;
|
||||
}
|
||||
|
||||
/// Returns the value contained in this `ResultVal`, or the supplied default if it is missing.
|
||||
template <typename U>
|
||||
T ValueOr(U&& value) const {
|
||||
return !empty() ? object : std::move(value);
|
||||
}
|
||||
|
||||
/// Asserts that the result succeeded and returns a reference to it.
|
||||
T& Unwrap() & {
|
||||
ASSERT_MSG(Succeeded(), "Tried to Unwrap empty ResultVal");
|
||||
return **this;
|
||||
}
|
||||
|
||||
T&& Unwrap() && {
|
||||
ASSERT_MSG(Succeeded(), "Tried to Unwrap empty ResultVal");
|
||||
return std::move(**this);
|
||||
}
|
||||
|
||||
private:
|
||||
// A union is used to allocate the storage for the value, while allowing us to construct and
|
||||
// destruct it at will.
|
||||
union {
|
||||
T object;
|
||||
};
|
||||
ResultCode result_code;
|
||||
};
|
||||
|
||||
/**
|
||||
* This function is a helper used to construct `ResultVal`s. It receives the arguments to construct
|
||||
* `T` with and creates a success `ResultVal` contained the constructed value.
|
||||
*/
|
||||
template <typename T, typename... Args>
|
||||
ResultVal<T> MakeResult(Args&&... args) {
|
||||
return ResultVal<T>::WithCode(RESULT_SUCCESS, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deducible overload of MakeResult, allowing the template parameter to be ommited if you're just
|
||||
* copy or move constructing.
|
||||
*/
|
||||
template <typename Arg>
|
||||
ResultVal<std::remove_reference_t<Arg>> MakeResult(Arg&& arg) {
|
||||
return ResultVal<std::remove_reference_t<Arg>>::WithCode(RESULT_SUCCESS,
|
||||
std::forward<Arg>(arg));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for the success of `source` (which must evaluate to a ResultVal). If it succeeds, unwraps
|
||||
* the contained value and assigns it to `target`, which can be either an l-value expression or a
|
||||
* variable declaration. If it fails the return code is returned from the current function. Thus it
|
||||
* can be used to cascade errors out, achieving something akin to exception handling.
|
||||
*/
|
||||
#define CASCADE_RESULT(target, source) \
|
||||
auto CONCAT2(check_result_L, __LINE__) = source; \
|
||||
if (CONCAT2(check_result_L, __LINE__).Failed()) { \
|
||||
return CONCAT2(check_result_L, __LINE__).Code(); \
|
||||
} \
|
||||
target = std::move(*CONCAT2(check_result_L, __LINE__))
|
||||
|
||||
/**
|
||||
* Analogous to CASCADE_RESULT, but for a bare ResultCode. The code will be propagated if
|
||||
* non-success, or discarded otherwise.
|
||||
*/
|
||||
#define CASCADE_CODE(source) \
|
||||
do { \
|
||||
auto CONCAT2(check_result_L, __LINE__) = source; \
|
||||
if (CONCAT2(check_result_L, __LINE__).IsError()) { \
|
||||
return CONCAT2(check_result_L, __LINE__); \
|
||||
} \
|
||||
} while (false)
|
||||
860
src/core/hle/service/acc/acc.cpp
Executable file
860
src/core/hle/service/acc/acc.cpp
Executable file
@@ -0,0 +1,860 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include "common/common_paths.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/file_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/string_util.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/constants.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/file_sys/control_metadata.h"
|
||||
#include "core/file_sys/patch_manager.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/service/acc/acc.h"
|
||||
#include "core/hle/service/acc/acc_aa.h"
|
||||
#include "core/hle/service/acc/acc_su.h"
|
||||
#include "core/hle/service/acc/acc_u0.h"
|
||||
#include "core/hle/service/acc/acc_u1.h"
|
||||
#include "core/hle/service/acc/errors.h"
|
||||
#include "core/hle/service/acc/profile_manager.h"
|
||||
#include "core/hle/service/glue/arp.h"
|
||||
#include "core/hle/service/glue/manager.h"
|
||||
#include "core/hle/service/sm/sm.h"
|
||||
#include "core/loader/loader.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
constexpr ResultCode ERR_INVALID_BUFFER_SIZE{ErrorModule::Account, 30};
|
||||
constexpr ResultCode ERR_FAILED_SAVE_DATA{ErrorModule::Account, 100};
|
||||
|
||||
static std::string GetImagePath(Common::UUID uuid) {
|
||||
return Common::FS::GetUserPath(Common::FS::UserPath::NANDDir) +
|
||||
"/system/save/8000000000000010/su/avators/" + uuid.FormatSwitch() + ".jpg";
|
||||
}
|
||||
|
||||
static constexpr u32 SanitizeJPEGSize(std::size_t size) {
|
||||
constexpr std::size_t max_jpeg_image_size = 0x20000;
|
||||
return static_cast<u32>(std::min(size, max_jpeg_image_size));
|
||||
}
|
||||
|
||||
class IManagerForSystemService final : public ServiceFramework<IManagerForSystemService> {
|
||||
public:
|
||||
explicit IManagerForSystemService(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "IManagerForSystemService"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "CheckAvailability"},
|
||||
{1, nullptr, "GetAccountId"},
|
||||
{2, nullptr, "EnsureIdTokenCacheAsync"},
|
||||
{3, nullptr, "LoadIdTokenCache"},
|
||||
{100, nullptr, "SetSystemProgramIdentification"},
|
||||
{101, nullptr, "RefreshNotificationTokenAsync"}, // 7.0.0+
|
||||
{110, nullptr, "GetServiceEntryRequirementCache"}, // 4.0.0+
|
||||
{111, nullptr, "InvalidateServiceEntryRequirementCache"}, // 4.0.0+
|
||||
{112, nullptr, "InvalidateTokenCache"}, // 4.0.0 - 6.2.0
|
||||
{113, nullptr, "GetServiceEntryRequirementCacheForOnlinePlay"}, // 6.1.0+
|
||||
{120, nullptr, "GetNintendoAccountId"},
|
||||
{121, nullptr, "CalculateNintendoAccountAuthenticationFingerprint"}, // 9.0.0+
|
||||
{130, nullptr, "GetNintendoAccountUserResourceCache"},
|
||||
{131, nullptr, "RefreshNintendoAccountUserResourceCacheAsync"},
|
||||
{132, nullptr, "RefreshNintendoAccountUserResourceCacheAsyncIfSecondsElapsed"},
|
||||
{133, nullptr, "GetNintendoAccountVerificationUrlCache"}, // 9.0.0+
|
||||
{134, nullptr, "RefreshNintendoAccountVerificationUrlCache"}, // 9.0.0+
|
||||
{135, nullptr, "RefreshNintendoAccountVerificationUrlCacheAsyncIfSecondsElapsed"}, // 9.0.0+
|
||||
{140, nullptr, "GetNetworkServiceLicenseCache"}, // 5.0.0+
|
||||
{141, nullptr, "RefreshNetworkServiceLicenseCacheAsync"}, // 5.0.0+
|
||||
{142, nullptr, "RefreshNetworkServiceLicenseCacheAsyncIfSecondsElapsed"}, // 5.0.0+
|
||||
{150, nullptr, "CreateAuthorizationRequest"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
// 3.0.0+
|
||||
class IFloatingRegistrationRequest final : public ServiceFramework<IFloatingRegistrationRequest> {
|
||||
public:
|
||||
explicit IFloatingRegistrationRequest(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "IFloatingRegistrationRequest"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "GetSessionId"},
|
||||
{12, nullptr, "GetAccountId"},
|
||||
{13, nullptr, "GetLinkedNintendoAccountId"},
|
||||
{14, nullptr, "GetNickname"},
|
||||
{15, nullptr, "GetProfileImage"},
|
||||
{21, nullptr, "LoadIdTokenCache"},
|
||||
{100, nullptr, "RegisterUser"}, // [1.0.0-3.0.2] RegisterAsync
|
||||
{101, nullptr, "RegisterUserWithUid"}, // [1.0.0-3.0.2] RegisterWithUidAsync
|
||||
{102, nullptr, "RegisterNetworkServiceAccountAsync"}, // 4.0.0+
|
||||
{103, nullptr, "RegisterNetworkServiceAccountWithUidAsync"}, // 4.0.0+
|
||||
{110, nullptr, "SetSystemProgramIdentification"},
|
||||
{111, nullptr, "EnsureIdTokenCacheAsync"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
class IAdministrator final : public ServiceFramework<IAdministrator> {
|
||||
public:
|
||||
explicit IAdministrator(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "IAdministrator"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "CheckAvailability"},
|
||||
{1, nullptr, "GetAccountId"},
|
||||
{2, nullptr, "EnsureIdTokenCacheAsync"},
|
||||
{3, nullptr, "LoadIdTokenCache"},
|
||||
{100, nullptr, "SetSystemProgramIdentification"},
|
||||
{101, nullptr, "RefreshNotificationTokenAsync"}, // 7.0.0+
|
||||
{110, nullptr, "GetServiceEntryRequirementCache"}, // 4.0.0+
|
||||
{111, nullptr, "InvalidateServiceEntryRequirementCache"}, // 4.0.0+
|
||||
{112, nullptr, "InvalidateTokenCache"}, // 4.0.0 - 6.2.0
|
||||
{113, nullptr, "GetServiceEntryRequirementCacheForOnlinePlay"}, // 6.1.0+
|
||||
{120, nullptr, "GetNintendoAccountId"},
|
||||
{121, nullptr, "CalculateNintendoAccountAuthenticationFingerprint"}, // 9.0.0+
|
||||
{130, nullptr, "GetNintendoAccountUserResourceCache"},
|
||||
{131, nullptr, "RefreshNintendoAccountUserResourceCacheAsync"},
|
||||
{132, nullptr, "RefreshNintendoAccountUserResourceCacheAsyncIfSecondsElapsed"},
|
||||
{133, nullptr, "GetNintendoAccountVerificationUrlCache"}, // 9.0.0+
|
||||
{134, nullptr, "RefreshNintendoAccountVerificationUrlCacheAsync"}, // 9.0.0+
|
||||
{135, nullptr, "RefreshNintendoAccountVerificationUrlCacheAsyncIfSecondsElapsed"}, // 9.0.0+
|
||||
{140, nullptr, "GetNetworkServiceLicenseCache"}, // 5.0.0+
|
||||
{141, nullptr, "RefreshNetworkServiceLicenseCacheAsync"}, // 5.0.0+
|
||||
{142, nullptr, "RefreshNetworkServiceLicenseCacheAsyncIfSecondsElapsed"}, // 5.0.0+
|
||||
{150, nullptr, "CreateAuthorizationRequest"},
|
||||
{200, nullptr, "IsRegistered"},
|
||||
{201, nullptr, "RegisterAsync"},
|
||||
{202, nullptr, "UnregisterAsync"},
|
||||
{203, nullptr, "DeleteRegistrationInfoLocally"},
|
||||
{220, nullptr, "SynchronizeProfileAsync"},
|
||||
{221, nullptr, "UploadProfileAsync"},
|
||||
{222, nullptr, "SynchronizaProfileAsyncIfSecondsElapsed"},
|
||||
{250, nullptr, "IsLinkedWithNintendoAccount"},
|
||||
{251, nullptr, "CreateProcedureToLinkWithNintendoAccount"},
|
||||
{252, nullptr, "ResumeProcedureToLinkWithNintendoAccount"},
|
||||
{255, nullptr, "CreateProcedureToUpdateLinkageStateOfNintendoAccount"},
|
||||
{256, nullptr, "ResumeProcedureToUpdateLinkageStateOfNintendoAccount"},
|
||||
{260, nullptr, "CreateProcedureToLinkNnidWithNintendoAccount"}, // 3.0.0+
|
||||
{261, nullptr, "ResumeProcedureToLinkNnidWithNintendoAccount"}, // 3.0.0+
|
||||
{280, nullptr, "ProxyProcedureToAcquireApplicationAuthorizationForNintendoAccount"},
|
||||
{290, nullptr, "GetRequestForNintendoAccountUserResourceView"}, // 8.0.0+
|
||||
{300, nullptr, "TryRecoverNintendoAccountUserStateAsync"}, // 6.0.0+
|
||||
{400, nullptr, "IsServiceEntryRequirementCacheRefreshRequiredForOnlinePlay"}, // 6.1.0+
|
||||
{401, nullptr, "RefreshServiceEntryRequirementCacheForOnlinePlayAsync"}, // 6.1.0+
|
||||
{900, nullptr, "GetAuthenticationInfoForWin"}, // 9.0.0+
|
||||
{901, nullptr, "ImportAsyncForWin"}, // 9.0.0+
|
||||
{997, nullptr, "DebugUnlinkNintendoAccountAsync"},
|
||||
{998, nullptr, "DebugSetAvailabilityErrorDetail"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
class IAuthorizationRequest final : public ServiceFramework<IAuthorizationRequest> {
|
||||
public:
|
||||
explicit IAuthorizationRequest(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "IAuthorizationRequest"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "GetSessionId"},
|
||||
{10, nullptr, "InvokeWithoutInteractionAsync"},
|
||||
{19, nullptr, "IsAuthorized"},
|
||||
{20, nullptr, "GetAuthorizationCode"},
|
||||
{21, nullptr, "GetIdToken"},
|
||||
{22, nullptr, "GetState"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
class IOAuthProcedure final : public ServiceFramework<IOAuthProcedure> {
|
||||
public:
|
||||
explicit IOAuthProcedure(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "IOAuthProcedure"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "PrepareAsync"},
|
||||
{1, nullptr, "GetRequest"},
|
||||
{2, nullptr, "ApplyResponse"},
|
||||
{3, nullptr, "ApplyResponseAsync"},
|
||||
{10, nullptr, "Suspend"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
// 3.0.0+
|
||||
class IOAuthProcedureForExternalNsa final : public ServiceFramework<IOAuthProcedureForExternalNsa> {
|
||||
public:
|
||||
explicit IOAuthProcedureForExternalNsa(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "IOAuthProcedureForExternalNsa"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "PrepareAsync"},
|
||||
{1, nullptr, "GetRequest"},
|
||||
{2, nullptr, "ApplyResponse"},
|
||||
{3, nullptr, "ApplyResponseAsync"},
|
||||
{10, nullptr, "Suspend"},
|
||||
{100, nullptr, "GetAccountId"},
|
||||
{101, nullptr, "GetLinkedNintendoAccountId"},
|
||||
{102, nullptr, "GetNickname"},
|
||||
{103, nullptr, "GetProfileImage"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
class IOAuthProcedureForNintendoAccountLinkage final
|
||||
: public ServiceFramework<IOAuthProcedureForNintendoAccountLinkage> {
|
||||
public:
|
||||
explicit IOAuthProcedureForNintendoAccountLinkage(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "IOAuthProcedureForNintendoAccountLinkage"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "PrepareAsync"},
|
||||
{1, nullptr, "GetRequest"},
|
||||
{2, nullptr, "ApplyResponse"},
|
||||
{3, nullptr, "ApplyResponseAsync"},
|
||||
{10, nullptr, "Suspend"},
|
||||
{100, nullptr, "GetRequestWithTheme"},
|
||||
{101, nullptr, "IsNetworkServiceAccountReplaced"},
|
||||
{199, nullptr, "GetUrlForIntroductionOfExtraMembership"}, // 2.0.0 - 5.1.0
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
class INotifier final : public ServiceFramework<INotifier> {
|
||||
public:
|
||||
explicit INotifier(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "INotifier"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "GetSystemEvent"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
class IProfileCommon : public ServiceFramework<IProfileCommon> {
|
||||
public:
|
||||
explicit IProfileCommon(Core::System& system_, const char* name, bool editor_commands,
|
||||
Common::UUID user_id_, ProfileManager& profile_manager_)
|
||||
: ServiceFramework{system_, name}, profile_manager{profile_manager_}, user_id{user_id_} {
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &IProfileCommon::Get, "Get"},
|
||||
{1, &IProfileCommon::GetBase, "GetBase"},
|
||||
{10, &IProfileCommon::GetImageSize, "GetImageSize"},
|
||||
{11, &IProfileCommon::LoadImage, "LoadImage"},
|
||||
};
|
||||
|
||||
RegisterHandlers(functions);
|
||||
|
||||
if (editor_commands) {
|
||||
static const FunctionInfo editor_functions[] = {
|
||||
{100, &IProfileCommon::Store, "Store"},
|
||||
{101, &IProfileCommon::StoreWithImage, "StoreWithImage"},
|
||||
};
|
||||
|
||||
RegisterHandlers(editor_functions);
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
void Get(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called user_id={}", user_id.Format());
|
||||
ProfileBase profile_base{};
|
||||
ProfileData data{};
|
||||
if (profile_manager.GetProfileBaseAndData(user_id, profile_base, data)) {
|
||||
ctx.WriteBuffer(data);
|
||||
IPC::ResponseBuilder rb{ctx, 16};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushRaw(profile_base);
|
||||
} else {
|
||||
LOG_ERROR(Service_ACC, "Failed to get profile base and data for user={}",
|
||||
user_id.Format());
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_UNKNOWN); // TODO(ogniK): Get actual error code
|
||||
}
|
||||
}
|
||||
|
||||
void GetBase(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called user_id={}", user_id.Format());
|
||||
ProfileBase profile_base{};
|
||||
if (profile_manager.GetProfileBase(user_id, profile_base)) {
|
||||
IPC::ResponseBuilder rb{ctx, 16};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushRaw(profile_base);
|
||||
} else {
|
||||
LOG_ERROR(Service_ACC, "Failed to get profile base for user={}", user_id.Format());
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_UNKNOWN); // TODO(ogniK): Get actual error code
|
||||
}
|
||||
}
|
||||
|
||||
void LoadImage(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
|
||||
const Common::FS::IOFile image(GetImagePath(user_id), "rb");
|
||||
if (!image.IsOpen()) {
|
||||
LOG_WARNING(Service_ACC,
|
||||
"Failed to load user provided image! Falling back to built-in backup...");
|
||||
ctx.WriteBuffer(Core::Constants::ACCOUNT_BACKUP_JPEG);
|
||||
rb.Push(SanitizeJPEGSize(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
|
||||
return;
|
||||
}
|
||||
|
||||
const u32 size = SanitizeJPEGSize(image.GetSize());
|
||||
std::vector<u8> buffer(size);
|
||||
image.ReadBytes(buffer.data(), buffer.size());
|
||||
|
||||
ctx.WriteBuffer(buffer);
|
||||
rb.Push<u32>(size);
|
||||
}
|
||||
|
||||
void GetImageSize(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
|
||||
const Common::FS::IOFile image(GetImagePath(user_id), "rb");
|
||||
|
||||
if (!image.IsOpen()) {
|
||||
LOG_WARNING(Service_ACC,
|
||||
"Failed to load user provided image! Falling back to built-in backup...");
|
||||
rb.Push(SanitizeJPEGSize(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
|
||||
} else {
|
||||
rb.Push(SanitizeJPEGSize(image.GetSize()));
|
||||
}
|
||||
}
|
||||
|
||||
void Store(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto base = rp.PopRaw<ProfileBase>();
|
||||
|
||||
const auto user_data = ctx.ReadBuffer();
|
||||
|
||||
LOG_DEBUG(Service_ACC, "called, username='{}', timestamp={:016X}, uuid={}",
|
||||
Common::StringFromFixedZeroTerminatedBuffer(
|
||||
reinterpret_cast<const char*>(base.username.data()), base.username.size()),
|
||||
base.timestamp, base.user_uuid.Format());
|
||||
|
||||
if (user_data.size() < sizeof(ProfileData)) {
|
||||
LOG_ERROR(Service_ACC, "ProfileData buffer too small!");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ERR_INVALID_BUFFER_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
ProfileData data;
|
||||
std::memcpy(&data, user_data.data(), sizeof(ProfileData));
|
||||
|
||||
if (!profile_manager.SetProfileBaseAndData(user_id, base, data)) {
|
||||
LOG_ERROR(Service_ACC, "Failed to update profile data and base!");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ERR_FAILED_SAVE_DATA);
|
||||
return;
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void StoreWithImage(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto base = rp.PopRaw<ProfileBase>();
|
||||
|
||||
const auto user_data = ctx.ReadBuffer();
|
||||
const auto image_data = ctx.ReadBuffer(1);
|
||||
|
||||
LOG_DEBUG(Service_ACC, "called, username='{}', timestamp={:016X}, uuid={}",
|
||||
Common::StringFromFixedZeroTerminatedBuffer(
|
||||
reinterpret_cast<const char*>(base.username.data()), base.username.size()),
|
||||
base.timestamp, base.user_uuid.Format());
|
||||
|
||||
if (user_data.size() < sizeof(ProfileData)) {
|
||||
LOG_ERROR(Service_ACC, "ProfileData buffer too small!");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ERR_INVALID_BUFFER_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
ProfileData data;
|
||||
std::memcpy(&data, user_data.data(), sizeof(ProfileData));
|
||||
|
||||
Common::FS::IOFile image(GetImagePath(user_id), "wb");
|
||||
|
||||
if (!image.IsOpen() || !image.Resize(image_data.size()) ||
|
||||
image.WriteBytes(image_data.data(), image_data.size()) != image_data.size() ||
|
||||
!profile_manager.SetProfileBaseAndData(user_id, base, data)) {
|
||||
LOG_ERROR(Service_ACC, "Failed to update profile data, base, and image!");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ERR_FAILED_SAVE_DATA);
|
||||
return;
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
ProfileManager& profile_manager;
|
||||
Common::UUID user_id{Common::INVALID_UUID}; ///< The user id this profile refers to.
|
||||
};
|
||||
|
||||
class IProfile final : public IProfileCommon {
|
||||
public:
|
||||
explicit IProfile(Core::System& system_, Common::UUID user_id_,
|
||||
ProfileManager& profile_manager_)
|
||||
: IProfileCommon{system_, "IProfile", false, user_id_, profile_manager_} {}
|
||||
};
|
||||
|
||||
class IProfileEditor final : public IProfileCommon {
|
||||
public:
|
||||
explicit IProfileEditor(Core::System& system_, Common::UUID user_id_,
|
||||
ProfileManager& profile_manager_)
|
||||
: IProfileCommon{system_, "IProfileEditor", true, user_id_, profile_manager_} {}
|
||||
};
|
||||
|
||||
class IAsyncContext final : public ServiceFramework<IAsyncContext> {
|
||||
public:
|
||||
explicit IAsyncContext(Core::System& system_) : ServiceFramework{system_, "IAsyncContext"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "GetSystemEvent"},
|
||||
{1, nullptr, "Cancel"},
|
||||
{2, nullptr, "HasDone"},
|
||||
{3, nullptr, "GetResult"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
class ISessionObject final : public ServiceFramework<ISessionObject> {
|
||||
public:
|
||||
explicit ISessionObject(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "ISessionObject"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{999, nullptr, "Dummy"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
class IGuestLoginRequest final : public ServiceFramework<IGuestLoginRequest> {
|
||||
public:
|
||||
explicit IGuestLoginRequest(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "IGuestLoginRequest"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "GetSessionId"},
|
||||
{11, nullptr, "Unknown"}, // 1.0.0 - 2.3.0 (the name is blank on Switchbrew)
|
||||
{12, nullptr, "GetAccountId"},
|
||||
{13, nullptr, "GetLinkedNintendoAccountId"},
|
||||
{14, nullptr, "GetNickname"},
|
||||
{15, nullptr, "GetProfileImage"},
|
||||
{21, nullptr, "LoadIdTokenCache"}, // 3.0.0+
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
class IManagerForApplication final : public ServiceFramework<IManagerForApplication> {
|
||||
public:
|
||||
explicit IManagerForApplication(Core::System& system_, Common::UUID user_id_)
|
||||
: ServiceFramework{system_, "IManagerForApplication"}, user_id{user_id_} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &IManagerForApplication::CheckAvailability, "CheckAvailability"},
|
||||
{1, &IManagerForApplication::GetAccountId, "GetAccountId"},
|
||||
{2, nullptr, "EnsureIdTokenCacheAsync"},
|
||||
{3, nullptr, "LoadIdTokenCache"},
|
||||
{130, nullptr, "GetNintendoAccountUserResourceCacheForApplication"},
|
||||
{150, nullptr, "CreateAuthorizationRequest"},
|
||||
{160, &IManagerForApplication::StoreOpenContext, "StoreOpenContext"},
|
||||
{170, nullptr, "LoadNetworkServiceLicenseKindAsync"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
private:
|
||||
void CheckAvailability(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_ACC, "(STUBBED) called");
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push(false); // TODO: Check when this is supposed to return true and when not
|
||||
}
|
||||
|
||||
void GetAccountId(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushRaw<u64>(user_id.GetNintendoID());
|
||||
}
|
||||
|
||||
void StoreOpenContext(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_ACC, "(STUBBED) called");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
Common::UUID user_id;
|
||||
};
|
||||
|
||||
// 6.0.0+
|
||||
class IAsyncNetworkServiceLicenseKindContext final
|
||||
: public ServiceFramework<IAsyncNetworkServiceLicenseKindContext> {
|
||||
public:
|
||||
explicit IAsyncNetworkServiceLicenseKindContext(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "IAsyncNetworkServiceLicenseKindContext"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "GetSystemEvent"},
|
||||
{1, nullptr, "Cancel"},
|
||||
{2, nullptr, "HasDone"},
|
||||
{3, nullptr, "GetResult"},
|
||||
{4, nullptr, "GetNetworkServiceLicenseKind"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
// 8.0.0+
|
||||
class IOAuthProcedureForUserRegistration final
|
||||
: public ServiceFramework<IOAuthProcedureForUserRegistration> {
|
||||
public:
|
||||
explicit IOAuthProcedureForUserRegistration(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "IOAuthProcedureForUserRegistration"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "PrepareAsync"},
|
||||
{1, nullptr, "GetRequest"},
|
||||
{2, nullptr, "ApplyResponse"},
|
||||
{3, nullptr, "ApplyResponseAsync"},
|
||||
{10, nullptr, "Suspend"},
|
||||
{100, nullptr, "GetAccountId"},
|
||||
{101, nullptr, "GetLinkedNintendoAccountId"},
|
||||
{102, nullptr, "GetNickname"},
|
||||
{103, nullptr, "GetProfileImage"},
|
||||
{110, nullptr, "RegisterUserAsync"},
|
||||
{111, nullptr, "GetUid"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
class DAUTH_O final : public ServiceFramework<DAUTH_O> {
|
||||
public:
|
||||
explicit DAUTH_O(Core::System& system_, Common::UUID) : ServiceFramework{system_, "dauth:o"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "EnsureAuthenticationTokenCacheAsync"}, // [5.0.0-5.1.0] GeneratePostData
|
||||
{1, nullptr, "LoadAuthenticationTokenCache"}, // 6.0.0+
|
||||
{2, nullptr, "InvalidateAuthenticationTokenCache"}, // 6.0.0+
|
||||
{10, nullptr, "EnsureEdgeTokenCacheAsync"}, // 6.0.0+
|
||||
{11, nullptr, "LoadEdgeTokenCache"}, // 6.0.0+
|
||||
{12, nullptr, "InvalidateEdgeTokenCache"}, // 6.0.0+
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
// 6.0.0+
|
||||
class IAsyncResult final : public ServiceFramework<IAsyncResult> {
|
||||
public:
|
||||
explicit IAsyncResult(Core::System& system_, Common::UUID)
|
||||
: ServiceFramework{system_, "IAsyncResult"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "GetResult"},
|
||||
{1, nullptr, "Cancel"},
|
||||
{2, nullptr, "IsAvailable"},
|
||||
{3, nullptr, "GetSystemEvent"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
};
|
||||
|
||||
void Module::Interface::GetUserCount(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push<u32>(static_cast<u32>(profile_manager->GetUserCount()));
|
||||
}
|
||||
|
||||
void Module::Interface::GetUserExistence(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
Common::UUID user_id = rp.PopRaw<Common::UUID>();
|
||||
LOG_DEBUG(Service_ACC, "called user_id={}", user_id.Format());
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push(profile_manager->UserExists(user_id));
|
||||
}
|
||||
|
||||
void Module::Interface::ListAllUsers(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
ctx.WriteBuffer(profile_manager->GetAllUsers());
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Module::Interface::ListOpenUsers(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
ctx.WriteBuffer(profile_manager->GetOpenUsers());
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Module::Interface::GetLastOpenedUser(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
IPC::ResponseBuilder rb{ctx, 6};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushRaw<Common::UUID>(profile_manager->GetLastOpenedUser());
|
||||
}
|
||||
|
||||
void Module::Interface::GetProfile(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
Common::UUID user_id = rp.PopRaw<Common::UUID>();
|
||||
LOG_DEBUG(Service_ACC, "called user_id={}", user_id.Format());
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushIpcInterface<IProfile>(system, user_id, *profile_manager);
|
||||
}
|
||||
|
||||
void Module::Interface::IsUserRegistrationRequestPermitted(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_ACC, "(STUBBED) called");
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push(profile_manager->CanSystemRegisterUser());
|
||||
}
|
||||
|
||||
void Module::Interface::InitializeApplicationInfo(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(InitializeApplicationInfoBase());
|
||||
}
|
||||
|
||||
void Module::Interface::InitializeApplicationInfoRestricted(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
|
||||
LOG_WARNING(Service_ACC, "(Partial implementation) called");
|
||||
|
||||
// TODO(ogniK): We require checking if the user actually owns the title and what not. As of
|
||||
// currently, we assume the user owns the title. InitializeApplicationInfoBase SHOULD be called
|
||||
// first then we do extra checks if the game is a digital copy.
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(InitializeApplicationInfoBase());
|
||||
}
|
||||
|
||||
ResultCode Module::Interface::InitializeApplicationInfoBase() {
|
||||
if (application_info) {
|
||||
LOG_ERROR(Service_ACC, "Application already initialized");
|
||||
return ERR_ACCOUNTINFO_ALREADY_INITIALIZED;
|
||||
}
|
||||
|
||||
// TODO(ogniK): This should be changed to reflect the target process for when we have multiple
|
||||
// processes emulated. As we don't actually have pid support we should assume we're just using
|
||||
// our own process
|
||||
const auto& current_process = system.Kernel().CurrentProcess();
|
||||
const auto launch_property =
|
||||
system.GetARPManager().GetLaunchProperty(current_process->GetTitleID());
|
||||
|
||||
if (launch_property.Failed()) {
|
||||
LOG_ERROR(Service_ACC, "Failed to get launch property");
|
||||
return ERR_ACCOUNTINFO_BAD_APPLICATION;
|
||||
}
|
||||
|
||||
switch (launch_property->base_game_storage_id) {
|
||||
case FileSys::StorageId::GameCard:
|
||||
application_info.application_type = ApplicationType::GameCard;
|
||||
break;
|
||||
case FileSys::StorageId::Host:
|
||||
case FileSys::StorageId::NandUser:
|
||||
case FileSys::StorageId::SdCard:
|
||||
case FileSys::StorageId::None: // Yuzu specific, differs from hardware
|
||||
application_info.application_type = ApplicationType::Digital;
|
||||
break;
|
||||
default:
|
||||
LOG_ERROR(Service_ACC, "Invalid game storage ID! storage_id={}",
|
||||
launch_property->base_game_storage_id);
|
||||
return ERR_ACCOUNTINFO_BAD_APPLICATION;
|
||||
}
|
||||
|
||||
LOG_WARNING(Service_ACC, "ApplicationInfo init required");
|
||||
// TODO(ogniK): Actual initalization here
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
void Module::Interface::GetBaasAccountManagerForApplication(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushIpcInterface<IManagerForApplication>(system, profile_manager->GetLastOpenedUser());
|
||||
}
|
||||
|
||||
void Module::Interface::IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
FileSys::NACP nacp;
|
||||
const auto res = system.GetAppLoader().ReadControlData(nacp);
|
||||
|
||||
bool is_locked = false;
|
||||
|
||||
if (res != Loader::ResultStatus::Success) {
|
||||
const FileSys::PatchManager pm{system.CurrentProcess()->GetTitleID(),
|
||||
system.GetFileSystemController(),
|
||||
system.GetContentProvider()};
|
||||
const auto nacp_unique = pm.GetControlMetadata().first;
|
||||
|
||||
if (nacp_unique != nullptr) {
|
||||
is_locked = nacp_unique->GetUserAccountSwitchLock();
|
||||
} else {
|
||||
LOG_ERROR(Service_ACC, "nacp_unique is null!");
|
||||
}
|
||||
} else {
|
||||
is_locked = nacp.GetUserAccountSwitchLock();
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push(is_locked);
|
||||
}
|
||||
|
||||
void Module::Interface::GetProfileEditor(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
Common::UUID user_id = rp.PopRaw<Common::UUID>();
|
||||
|
||||
LOG_DEBUG(Service_ACC, "called, user_id={}", user_id.Format());
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushIpcInterface<IProfileEditor>(system, user_id, *profile_manager);
|
||||
}
|
||||
|
||||
void Module::Interface::ListQualifiedUsers(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
|
||||
// All users should be qualified. We don't actually have parental control or anything to do with
|
||||
// nintendo online currently. We're just going to assume the user running the game has access to
|
||||
// the game regardless of parental control settings.
|
||||
ctx.WriteBuffer(profile_manager->GetAllUsers());
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Module::Interface::LoadOpenContext(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_ACC, "(STUBBED) called");
|
||||
|
||||
// This is similar to GetBaasAccountManagerForApplication
|
||||
// This command is used concurrently with ListOpenContextStoredUsers
|
||||
// TODO: Find the differences between this and GetBaasAccountManagerForApplication
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushIpcInterface<IManagerForApplication>(system, profile_manager->GetLastOpenedUser());
|
||||
}
|
||||
|
||||
void Module::Interface::ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_ACC, "(STUBBED) called");
|
||||
|
||||
// TODO(ogniK): Handle open contexts
|
||||
ctx.WriteBuffer(profile_manager->GetOpenUsers());
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_ACC, "called");
|
||||
// A u8 is passed into this function which we can safely ignore. It's to determine if we have
|
||||
// access to use the network or not by the looks of it
|
||||
IPC::ResponseBuilder rb{ctx, 6};
|
||||
if (profile_manager->GetUserCount() != 1) {
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushRaw<u128>(Common::INVALID_UUID);
|
||||
return;
|
||||
}
|
||||
|
||||
const auto user_list = profile_manager->GetAllUsers();
|
||||
if (std::all_of(user_list.begin(), user_list.end(),
|
||||
[](const auto& user) { return user.uuid == Common::INVALID_UUID; })) {
|
||||
rb.Push(RESULT_UNKNOWN); // TODO(ogniK): Find the correct error code
|
||||
rb.PushRaw<u128>(Common::INVALID_UUID);
|
||||
return;
|
||||
}
|
||||
|
||||
// Select the first user we have
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushRaw<u128>(profile_manager->GetUser(0)->uuid);
|
||||
}
|
||||
|
||||
Module::Interface::Interface(std::shared_ptr<Module> module_,
|
||||
std::shared_ptr<ProfileManager> profile_manager_,
|
||||
Core::System& system_, const char* name)
|
||||
: ServiceFramework{system_, name}, module{std::move(module_)}, profile_manager{std::move(
|
||||
profile_manager_)} {}
|
||||
|
||||
Module::Interface::~Interface() = default;
|
||||
|
||||
void InstallInterfaces(Core::System& system) {
|
||||
auto module = std::make_shared<Module>();
|
||||
auto profile_manager = std::make_shared<ProfileManager>();
|
||||
|
||||
std::make_shared<ACC_AA>(module, profile_manager, system)
|
||||
->InstallAsService(system.ServiceManager());
|
||||
std::make_shared<ACC_SU>(module, profile_manager, system)
|
||||
->InstallAsService(system.ServiceManager());
|
||||
std::make_shared<ACC_U0>(module, profile_manager, system)
|
||||
->InstallAsService(system.ServiceManager());
|
||||
std::make_shared<ACC_U1>(module, profile_manager, system)
|
||||
->InstallAsService(system.ServiceManager());
|
||||
}
|
||||
|
||||
} // namespace Service::Account
|
||||
69
src/core/hle/service/acc/acc.h
Executable file
69
src/core/hle/service/acc/acc.h
Executable file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/glue/manager.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
class ProfileManager;
|
||||
|
||||
class Module final {
|
||||
public:
|
||||
class Interface : public ServiceFramework<Interface> {
|
||||
public:
|
||||
explicit Interface(std::shared_ptr<Module> module_,
|
||||
std::shared_ptr<ProfileManager> profile_manager_, Core::System& system_,
|
||||
const char* name);
|
||||
~Interface() override;
|
||||
|
||||
void GetUserCount(Kernel::HLERequestContext& ctx);
|
||||
void GetUserExistence(Kernel::HLERequestContext& ctx);
|
||||
void ListAllUsers(Kernel::HLERequestContext& ctx);
|
||||
void ListOpenUsers(Kernel::HLERequestContext& ctx);
|
||||
void GetLastOpenedUser(Kernel::HLERequestContext& ctx);
|
||||
void GetProfile(Kernel::HLERequestContext& ctx);
|
||||
void InitializeApplicationInfo(Kernel::HLERequestContext& ctx);
|
||||
void InitializeApplicationInfoRestricted(Kernel::HLERequestContext& ctx);
|
||||
void GetBaasAccountManagerForApplication(Kernel::HLERequestContext& ctx);
|
||||
void IsUserRegistrationRequestPermitted(Kernel::HLERequestContext& ctx);
|
||||
void TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx);
|
||||
void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx);
|
||||
void GetProfileEditor(Kernel::HLERequestContext& ctx);
|
||||
void ListQualifiedUsers(Kernel::HLERequestContext& ctx);
|
||||
void LoadOpenContext(Kernel::HLERequestContext& ctx);
|
||||
void ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx);
|
||||
|
||||
private:
|
||||
ResultCode InitializeApplicationInfoBase();
|
||||
|
||||
enum class ApplicationType : u32_le {
|
||||
GameCard = 0,
|
||||
Digital = 1,
|
||||
Unknown = 3,
|
||||
};
|
||||
|
||||
struct ApplicationInfo {
|
||||
Service::Glue::ApplicationLaunchProperty launch_property;
|
||||
ApplicationType application_type;
|
||||
|
||||
constexpr explicit operator bool() const {
|
||||
return launch_property.title_id != 0x0;
|
||||
}
|
||||
};
|
||||
|
||||
ApplicationInfo application_info{};
|
||||
|
||||
protected:
|
||||
std::shared_ptr<Module> module;
|
||||
std::shared_ptr<ProfileManager> profile_manager;
|
||||
};
|
||||
};
|
||||
|
||||
/// Registers all ACC services with the specified service manager.
|
||||
void InstallInterfaces(Core::System& system);
|
||||
|
||||
} // namespace Service::Account
|
||||
24
src/core/hle/service/acc/acc_aa.cpp
Executable file
24
src/core/hle/service/acc/acc_aa.cpp
Executable file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/service/acc/acc_aa.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
ACC_AA::ACC_AA(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> profile_manager,
|
||||
Core::System& system)
|
||||
: Module::Interface(std::move(module), std::move(profile_manager), system, "acc:aa") {
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "EnsureCacheAsync"},
|
||||
{1, nullptr, "LoadCache"},
|
||||
{2, nullptr, "GetDeviceAccountId"},
|
||||
{50, nullptr, "RegisterNotificationTokenAsync"}, // 1.0.0 - 6.2.0
|
||||
{51, nullptr, "UnregisterNotificationTokenAsync"}, // 1.0.0 - 6.2.0
|
||||
};
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
ACC_AA::~ACC_AA() = default;
|
||||
|
||||
} // namespace Service::Account
|
||||
18
src/core/hle/service/acc/acc_aa.h
Executable file
18
src/core/hle/service/acc/acc_aa.h
Executable file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/acc/acc.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
class ACC_AA final : public Module::Interface {
|
||||
public:
|
||||
explicit ACC_AA(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> profile_manager,
|
||||
Core::System& system);
|
||||
~ACC_AA() override;
|
||||
};
|
||||
|
||||
} // namespace Service::Account
|
||||
68
src/core/hle/service/acc/acc_su.cpp
Executable file
68
src/core/hle/service/acc/acc_su.cpp
Executable file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/service/acc/acc_su.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> profile_manager,
|
||||
Core::System& system)
|
||||
: Module::Interface(std::move(module), std::move(profile_manager), system, "acc:su") {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &ACC_SU::GetUserCount, "GetUserCount"},
|
||||
{1, &ACC_SU::GetUserExistence, "GetUserExistence"},
|
||||
{2, &ACC_SU::ListAllUsers, "ListAllUsers"},
|
||||
{3, &ACC_SU::ListOpenUsers, "ListOpenUsers"},
|
||||
{4, &ACC_SU::GetLastOpenedUser, "GetLastOpenedUser"},
|
||||
{5, &ACC_SU::GetProfile, "GetProfile"},
|
||||
{6, nullptr, "GetProfileDigest"}, // 3.0.0+
|
||||
{50, &ACC_SU::IsUserRegistrationRequestPermitted, "IsUserRegistrationRequestPermitted"},
|
||||
{51, &ACC_SU::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"},
|
||||
{60, &ACC_SU::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 5.0.0 - 5.1.0
|
||||
{99, nullptr, "DebugActivateOpenContextRetention"}, // 6.0.0+
|
||||
{100, nullptr, "GetUserRegistrationNotifier"},
|
||||
{101, nullptr, "GetUserStateChangeNotifier"},
|
||||
{102, nullptr, "GetBaasAccountManagerForSystemService"},
|
||||
{103, nullptr, "GetBaasUserAvailabilityChangeNotifier"},
|
||||
{104, nullptr, "GetProfileUpdateNotifier"},
|
||||
{105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+
|
||||
{106, nullptr, "GetProfileSyncNotifier"}, // 9.0.0+
|
||||
{110, nullptr, "StoreSaveDataThumbnail"},
|
||||
{111, nullptr, "ClearSaveDataThumbnail"},
|
||||
{112, nullptr, "LoadSaveDataThumbnail"},
|
||||
{113, nullptr, "GetSaveDataThumbnailExistence"}, // 5.0.0+
|
||||
{120, nullptr, "ListOpenUsersInApplication"}, // 10.0.0+
|
||||
{130, nullptr, "ActivateOpenContextRetention"}, // 6.0.0+
|
||||
{140, &ACC_SU::ListQualifiedUsers, "ListQualifiedUsers"}, // 6.0.0+
|
||||
{150, nullptr, "AuthenticateApplicationAsync"}, // 10.0.0+
|
||||
{190, nullptr, "GetUserLastOpenedApplication"}, // 1.0.0 - 9.2.0
|
||||
{191, nullptr, "ActivateOpenContextHolder"}, // 7.0.0+
|
||||
{200, nullptr, "BeginUserRegistration"},
|
||||
{201, nullptr, "CompleteUserRegistration"},
|
||||
{202, nullptr, "CancelUserRegistration"},
|
||||
{203, nullptr, "DeleteUser"},
|
||||
{204, nullptr, "SetUserPosition"},
|
||||
{205, &ACC_SU::GetProfileEditor, "GetProfileEditor"},
|
||||
{206, nullptr, "CompleteUserRegistrationForcibly"},
|
||||
{210, nullptr, "CreateFloatingRegistrationRequest"}, // 3.0.0+
|
||||
{211, nullptr, "CreateProcedureToRegisterUserWithNintendoAccount"}, // 8.0.0+
|
||||
{212, nullptr, "ResumeProcedureToRegisterUserWithNintendoAccount"}, // 8.0.0+
|
||||
{230, nullptr, "AuthenticateServiceAsync"},
|
||||
{250, nullptr, "GetBaasAccountAdministrator"},
|
||||
{290, nullptr, "ProxyProcedureForGuestLoginWithNintendoAccount"},
|
||||
{291, nullptr, "ProxyProcedureForFloatingRegistrationWithNintendoAccount"}, // 3.0.0+
|
||||
{299, nullptr, "SuspendBackgroundDaemon"},
|
||||
{997, nullptr, "DebugInvalidateTokenCacheForUser"}, // 3.0.0+
|
||||
{998, nullptr, "DebugSetUserStateClose"},
|
||||
{999, nullptr, "DebugSetUserStateOpen"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
ACC_SU::~ACC_SU() = default;
|
||||
|
||||
} // namespace Service::Account
|
||||
18
src/core/hle/service/acc/acc_su.h
Executable file
18
src/core/hle/service/acc/acc_su.h
Executable file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/acc/acc.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
class ACC_SU final : public Module::Interface {
|
||||
public:
|
||||
explicit ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> profile_manager,
|
||||
Core::System& system);
|
||||
~ACC_SU() override;
|
||||
};
|
||||
|
||||
} // namespace Service::Account
|
||||
45
src/core/hle/service/acc/acc_u0.cpp
Executable file
45
src/core/hle/service/acc/acc_u0.cpp
Executable file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/service/acc/acc_u0.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
ACC_U0::ACC_U0(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> profile_manager,
|
||||
Core::System& system)
|
||||
: Module::Interface(std::move(module), std::move(profile_manager), system, "acc:u0") {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &ACC_U0::GetUserCount, "GetUserCount"},
|
||||
{1, &ACC_U0::GetUserExistence, "GetUserExistence"},
|
||||
{2, &ACC_U0::ListAllUsers, "ListAllUsers"},
|
||||
{3, &ACC_U0::ListOpenUsers, "ListOpenUsers"},
|
||||
{4, &ACC_U0::GetLastOpenedUser, "GetLastOpenedUser"},
|
||||
{5, &ACC_U0::GetProfile, "GetProfile"},
|
||||
{6, nullptr, "GetProfileDigest"}, // 3.0.0+
|
||||
{50, &ACC_U0::IsUserRegistrationRequestPermitted, "IsUserRegistrationRequestPermitted"},
|
||||
{51, &ACC_U0::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"},
|
||||
{60, &ACC_U0::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 5.0.0 - 5.1.0
|
||||
{99, nullptr, "DebugActivateOpenContextRetention"}, // 6.0.0+
|
||||
{100, &ACC_U0::InitializeApplicationInfo, "InitializeApplicationInfo"},
|
||||
{101, &ACC_U0::GetBaasAccountManagerForApplication, "GetBaasAccountManagerForApplication"},
|
||||
{102, nullptr, "AuthenticateApplicationAsync"},
|
||||
{103, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+
|
||||
{110, nullptr, "StoreSaveDataThumbnail"},
|
||||
{111, nullptr, "ClearSaveDataThumbnail"},
|
||||
{120, nullptr, "CreateGuestLoginRequest"},
|
||||
{130, &ACC_U0::LoadOpenContext, "LoadOpenContext"}, // 5.0.0+
|
||||
{131, &ACC_U0::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 6.0.0+
|
||||
{140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"}, // 6.0.0+
|
||||
{141, &ACC_U0::ListQualifiedUsers, "ListQualifiedUsers"}, // 6.0.0+
|
||||
{150, &ACC_U0::IsUserAccountSwitchLocked, "IsUserAccountSwitchLocked"}, // 6.0.0+
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
ACC_U0::~ACC_U0() = default;
|
||||
|
||||
} // namespace Service::Account
|
||||
18
src/core/hle/service/acc/acc_u0.h
Executable file
18
src/core/hle/service/acc/acc_u0.h
Executable file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/acc/acc.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
class ACC_U0 final : public Module::Interface {
|
||||
public:
|
||||
explicit ACC_U0(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> profile_manager,
|
||||
Core::System& system);
|
||||
~ACC_U0() override;
|
||||
};
|
||||
|
||||
} // namespace Service::Account
|
||||
53
src/core/hle/service/acc/acc_u1.cpp
Executable file
53
src/core/hle/service/acc/acc_u1.cpp
Executable file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/service/acc/acc_u1.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
ACC_U1::ACC_U1(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> profile_manager,
|
||||
Core::System& system)
|
||||
: Module::Interface(std::move(module), std::move(profile_manager), system, "acc:u1") {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &ACC_U1::GetUserCount, "GetUserCount"},
|
||||
{1, &ACC_U1::GetUserExistence, "GetUserExistence"},
|
||||
{2, &ACC_U1::ListAllUsers, "ListAllUsers"},
|
||||
{3, &ACC_U1::ListOpenUsers, "ListOpenUsers"},
|
||||
{4, &ACC_U1::GetLastOpenedUser, "GetLastOpenedUser"},
|
||||
{5, &ACC_U1::GetProfile, "GetProfile"},
|
||||
{6, nullptr, "GetProfileDigest"}, // 3.0.0+
|
||||
{50, &ACC_U1::IsUserRegistrationRequestPermitted, "IsUserRegistrationRequestPermitted"},
|
||||
{51, &ACC_U1::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"},
|
||||
{60, &ACC_U1::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 5.0.0 - 5.1.0
|
||||
{99, nullptr, "DebugActivateOpenContextRetention"}, // 6.0.0+
|
||||
{100, nullptr, "GetUserRegistrationNotifier"},
|
||||
{101, nullptr, "GetUserStateChangeNotifier"},
|
||||
{102, nullptr, "GetBaasAccountManagerForSystemService"},
|
||||
{103, nullptr, "GetBaasUserAvailabilityChangeNotifier"},
|
||||
{104, nullptr, "GetProfileUpdateNotifier"},
|
||||
{105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+
|
||||
{106, nullptr, "GetProfileSyncNotifier"}, // 9.0.0+
|
||||
{110, nullptr, "StoreSaveDataThumbnail"},
|
||||
{111, nullptr, "ClearSaveDataThumbnail"},
|
||||
{112, nullptr, "LoadSaveDataThumbnail"},
|
||||
{113, nullptr, "GetSaveDataThumbnailExistence"}, // 5.0.0+
|
||||
{120, nullptr, "ListOpenUsersInApplication"}, // 10.0.0+
|
||||
{130, nullptr, "ActivateOpenContextRetention"}, // 6.0.0+
|
||||
{140, &ACC_U1::ListQualifiedUsers, "ListQualifiedUsers"}, // 6.0.0+
|
||||
{150, nullptr, "AuthenticateApplicationAsync"}, // 10.0.0+
|
||||
{190, nullptr, "GetUserLastOpenedApplication"}, // 1.0.0 - 9.2.0
|
||||
{191, nullptr, "ActivateOpenContextHolder"}, // 7.0.0+
|
||||
{997, nullptr, "DebugInvalidateTokenCacheForUser"}, // 3.0.0+
|
||||
{998, nullptr, "DebugSetUserStateClose"},
|
||||
{999, nullptr, "DebugSetUserStateOpen"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
ACC_U1::~ACC_U1() = default;
|
||||
|
||||
} // namespace Service::Account
|
||||
18
src/core/hle/service/acc/acc_u1.h
Executable file
18
src/core/hle/service/acc/acc_u1.h
Executable file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/acc/acc.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
class ACC_U1 final : public Module::Interface {
|
||||
public:
|
||||
explicit ACC_U1(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> profile_manager,
|
||||
Core::System& system);
|
||||
~ACC_U1() override;
|
||||
};
|
||||
|
||||
} // namespace Service::Account
|
||||
14
src/core/hle/service/acc/errors.h
Executable file
14
src/core/hle/service/acc/errors.h
Executable file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
constexpr ResultCode ERR_ACCOUNTINFO_BAD_APPLICATION{ErrorModule::Account, 22};
|
||||
constexpr ResultCode ERR_ACCOUNTINFO_ALREADY_INITIALIZED{ErrorModule::Account, 41};
|
||||
|
||||
} // namespace Service::Account
|
||||
396
src/core/hle/service/acc/profile_manager.cpp
Executable file
396
src/core/hle/service/acc/profile_manager.cpp
Executable file
@@ -0,0 +1,396 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include <random>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "common/file_util.h"
|
||||
#include "core/hle/service/acc/profile_manager.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
namespace FS = Common::FS;
|
||||
using Common::UUID;
|
||||
|
||||
struct UserRaw {
|
||||
UUID uuid{Common::INVALID_UUID};
|
||||
UUID uuid2{Common::INVALID_UUID};
|
||||
u64 timestamp{};
|
||||
ProfileUsername username{};
|
||||
ProfileData extra_data{};
|
||||
};
|
||||
static_assert(sizeof(UserRaw) == 0xC8, "UserRaw has incorrect size.");
|
||||
|
||||
struct ProfileDataRaw {
|
||||
INSERT_PADDING_BYTES(0x10);
|
||||
std::array<UserRaw, MAX_USERS> users{};
|
||||
};
|
||||
static_assert(sizeof(ProfileDataRaw) == 0x650, "ProfileDataRaw has incorrect size.");
|
||||
|
||||
// TODO(ogniK): Get actual error codes
|
||||
constexpr ResultCode ERROR_TOO_MANY_USERS(ErrorModule::Account, u32(-1));
|
||||
constexpr ResultCode ERROR_USER_ALREADY_EXISTS(ErrorModule::Account, u32(-2));
|
||||
constexpr ResultCode ERROR_ARGUMENT_IS_NULL(ErrorModule::Account, 20);
|
||||
|
||||
constexpr char ACC_SAVE_AVATORS_BASE_PATH[] = "/system/save/8000000000000010/su/avators/";
|
||||
|
||||
ProfileManager::ProfileManager() {
|
||||
ParseUserSaveFile();
|
||||
|
||||
if (user_count == 0)
|
||||
CreateNewUser(UUID::Generate(), "yuzu");
|
||||
|
||||
auto current = std::clamp<int>(Settings::values.current_user, 0, MAX_USERS - 1);
|
||||
if (UserExistsIndex(current))
|
||||
current = 0;
|
||||
|
||||
OpenUser(*GetUser(current));
|
||||
}
|
||||
|
||||
ProfileManager::~ProfileManager() {
|
||||
WriteUserSaveFile();
|
||||
}
|
||||
|
||||
/// After a users creation it needs to be "registered" to the system. AddToProfiles handles the
|
||||
/// internal management of the users profiles
|
||||
std::optional<std::size_t> ProfileManager::AddToProfiles(const ProfileInfo& profile) {
|
||||
if (user_count >= MAX_USERS) {
|
||||
return std::nullopt;
|
||||
}
|
||||
profiles[user_count] = profile;
|
||||
return user_count++;
|
||||
}
|
||||
|
||||
/// Deletes a specific profile based on it's profile index
|
||||
bool ProfileManager::RemoveProfileAtIndex(std::size_t index) {
|
||||
if (index >= MAX_USERS || index >= user_count) {
|
||||
return false;
|
||||
}
|
||||
if (index < user_count - 1) {
|
||||
std::rotate(profiles.begin() + index, profiles.begin() + index + 1, profiles.end());
|
||||
}
|
||||
profiles.back() = {};
|
||||
user_count--;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Helper function to register a user to the system
|
||||
ResultCode ProfileManager::AddUser(const ProfileInfo& user) {
|
||||
if (!AddToProfiles(user)) {
|
||||
return ERROR_TOO_MANY_USERS;
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
/// Create a new user on the system. If the uuid of the user already exists, the user is not
|
||||
/// created.
|
||||
ResultCode ProfileManager::CreateNewUser(UUID uuid, const ProfileUsername& username) {
|
||||
if (user_count == MAX_USERS) {
|
||||
return ERROR_TOO_MANY_USERS;
|
||||
}
|
||||
if (!uuid) {
|
||||
return ERROR_ARGUMENT_IS_NULL;
|
||||
}
|
||||
if (username[0] == 0x0) {
|
||||
return ERROR_ARGUMENT_IS_NULL;
|
||||
}
|
||||
if (std::any_of(profiles.begin(), profiles.end(),
|
||||
[&uuid](const ProfileInfo& profile) { return uuid == profile.user_uuid; })) {
|
||||
return ERROR_USER_ALREADY_EXISTS;
|
||||
}
|
||||
|
||||
return AddUser({
|
||||
.user_uuid = uuid,
|
||||
.username = username,
|
||||
.creation_time = 0,
|
||||
.data = {},
|
||||
.is_open = false,
|
||||
});
|
||||
}
|
||||
|
||||
/// Creates a new user on the system. This function allows a much simpler method of registration
|
||||
/// specifically by allowing an std::string for the username. This is required specifically since
|
||||
/// we're loading a string straight from the config
|
||||
ResultCode ProfileManager::CreateNewUser(UUID uuid, const std::string& username) {
|
||||
ProfileUsername username_output{};
|
||||
|
||||
if (username.size() > username_output.size()) {
|
||||
std::copy_n(username.begin(), username_output.size(), username_output.begin());
|
||||
} else {
|
||||
std::copy(username.begin(), username.end(), username_output.begin());
|
||||
}
|
||||
return CreateNewUser(uuid, username_output);
|
||||
}
|
||||
|
||||
std::optional<UUID> ProfileManager::GetUser(std::size_t index) const {
|
||||
if (index >= MAX_USERS) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return profiles[index].user_uuid;
|
||||
}
|
||||
|
||||
/// Returns a users profile index based on their user id.
|
||||
std::optional<std::size_t> ProfileManager::GetUserIndex(const UUID& uuid) const {
|
||||
if (!uuid) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const auto iter = std::find_if(profiles.begin(), profiles.end(),
|
||||
[&uuid](const ProfileInfo& p) { return p.user_uuid == uuid; });
|
||||
if (iter == profiles.end()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return static_cast<std::size_t>(std::distance(profiles.begin(), iter));
|
||||
}
|
||||
|
||||
/// Returns a users profile index based on their profile
|
||||
std::optional<std::size_t> ProfileManager::GetUserIndex(const ProfileInfo& user) const {
|
||||
return GetUserIndex(user.user_uuid);
|
||||
}
|
||||
|
||||
/// Returns the data structure used by the switch when GetProfileBase is called on acc:*
|
||||
bool ProfileManager::GetProfileBase(std::optional<std::size_t> index, ProfileBase& profile) const {
|
||||
if (!index || index >= MAX_USERS) {
|
||||
return false;
|
||||
}
|
||||
const auto& prof_info = profiles[*index];
|
||||
profile.user_uuid = prof_info.user_uuid;
|
||||
profile.username = prof_info.username;
|
||||
profile.timestamp = prof_info.creation_time;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Returns the data structure used by the switch when GetProfileBase is called on acc:*
|
||||
bool ProfileManager::GetProfileBase(UUID uuid, ProfileBase& profile) const {
|
||||
const auto idx = GetUserIndex(uuid);
|
||||
return GetProfileBase(idx, profile);
|
||||
}
|
||||
|
||||
/// Returns the data structure used by the switch when GetProfileBase is called on acc:*
|
||||
bool ProfileManager::GetProfileBase(const ProfileInfo& user, ProfileBase& profile) const {
|
||||
return GetProfileBase(user.user_uuid, profile);
|
||||
}
|
||||
|
||||
/// Returns the current user count on the system. We keep a variable which tracks the count so we
|
||||
/// don't have to loop the internal profile array every call.
|
||||
|
||||
std::size_t ProfileManager::GetUserCount() const {
|
||||
return user_count;
|
||||
}
|
||||
|
||||
/// Lists the current "opened" users on the system. Users are typically not open until they sign
|
||||
/// into something or pick a profile. As of right now users should all be open until qlaunch is
|
||||
/// booting
|
||||
|
||||
std::size_t ProfileManager::GetOpenUserCount() const {
|
||||
return std::count_if(profiles.begin(), profiles.end(),
|
||||
[](const ProfileInfo& p) { return p.is_open; });
|
||||
}
|
||||
|
||||
/// Checks if a user id exists in our profile manager
|
||||
bool ProfileManager::UserExists(UUID uuid) const {
|
||||
return GetUserIndex(uuid).has_value();
|
||||
}
|
||||
|
||||
bool ProfileManager::UserExistsIndex(std::size_t index) const {
|
||||
if (index >= MAX_USERS)
|
||||
return false;
|
||||
return profiles[index].user_uuid.uuid != Common::INVALID_UUID;
|
||||
}
|
||||
|
||||
/// Opens a specific user
|
||||
void ProfileManager::OpenUser(UUID uuid) {
|
||||
const auto idx = GetUserIndex(uuid);
|
||||
if (!idx) {
|
||||
return;
|
||||
}
|
||||
|
||||
profiles[*idx].is_open = true;
|
||||
last_opened_user = uuid;
|
||||
}
|
||||
|
||||
/// Closes a specific user
|
||||
void ProfileManager::CloseUser(UUID uuid) {
|
||||
const auto idx = GetUserIndex(uuid);
|
||||
if (!idx) {
|
||||
return;
|
||||
}
|
||||
|
||||
profiles[*idx].is_open = false;
|
||||
}
|
||||
|
||||
/// Gets all valid user ids on the system
|
||||
UserIDArray ProfileManager::GetAllUsers() const {
|
||||
UserIDArray output;
|
||||
std::transform(profiles.begin(), profiles.end(), output.begin(),
|
||||
[](const ProfileInfo& p) { return p.user_uuid; });
|
||||
return output;
|
||||
}
|
||||
|
||||
/// Get all the open users on the system and zero out the rest of the data. This is specifically
|
||||
/// needed for GetOpenUsers and we need to ensure the rest of the output buffer is zero'd out
|
||||
UserIDArray ProfileManager::GetOpenUsers() const {
|
||||
UserIDArray output;
|
||||
std::transform(profiles.begin(), profiles.end(), output.begin(), [](const ProfileInfo& p) {
|
||||
if (p.is_open)
|
||||
return p.user_uuid;
|
||||
return UUID{Common::INVALID_UUID};
|
||||
});
|
||||
std::stable_partition(output.begin(), output.end(), [](const UUID& uuid) { return uuid; });
|
||||
return output;
|
||||
}
|
||||
|
||||
/// Returns the last user which was opened
|
||||
UUID ProfileManager::GetLastOpenedUser() const {
|
||||
return last_opened_user;
|
||||
}
|
||||
|
||||
/// Return the users profile base and the unknown arbitary data.
|
||||
bool ProfileManager::GetProfileBaseAndData(std::optional<std::size_t> index, ProfileBase& profile,
|
||||
ProfileData& data) const {
|
||||
if (GetProfileBase(index, profile)) {
|
||||
data = profiles[*index].data;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Return the users profile base and the unknown arbitary data.
|
||||
bool ProfileManager::GetProfileBaseAndData(UUID uuid, ProfileBase& profile,
|
||||
ProfileData& data) const {
|
||||
const auto idx = GetUserIndex(uuid);
|
||||
return GetProfileBaseAndData(idx, profile, data);
|
||||
}
|
||||
|
||||
/// Return the users profile base and the unknown arbitary data.
|
||||
bool ProfileManager::GetProfileBaseAndData(const ProfileInfo& user, ProfileBase& profile,
|
||||
ProfileData& data) const {
|
||||
return GetProfileBaseAndData(user.user_uuid, profile, data);
|
||||
}
|
||||
|
||||
/// Returns if the system is allowing user registrations or not
|
||||
bool ProfileManager::CanSystemRegisterUser() const {
|
||||
return false; // TODO(ogniK): Games shouldn't have
|
||||
// access to user registration, when we
|
||||
// emulate qlaunch. Update this to dynamically change.
|
||||
}
|
||||
|
||||
bool ProfileManager::RemoveUser(UUID uuid) {
|
||||
const auto index = GetUserIndex(uuid);
|
||||
if (!index) {
|
||||
return false;
|
||||
}
|
||||
|
||||
profiles[*index] = ProfileInfo{};
|
||||
std::stable_partition(profiles.begin(), profiles.end(),
|
||||
[](const ProfileInfo& profile) { return profile.user_uuid; });
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ProfileManager::SetProfileBase(UUID uuid, const ProfileBase& profile_new) {
|
||||
const auto index = GetUserIndex(uuid);
|
||||
if (!index || profile_new.user_uuid == UUID(Common::INVALID_UUID)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto& profile = profiles[*index];
|
||||
profile.user_uuid = profile_new.user_uuid;
|
||||
profile.username = profile_new.username;
|
||||
profile.creation_time = profile_new.timestamp;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ProfileManager::SetProfileBaseAndData(Common::UUID uuid, const ProfileBase& profile_new,
|
||||
const ProfileData& data_new) {
|
||||
const auto index = GetUserIndex(uuid);
|
||||
if (index.has_value() && SetProfileBase(uuid, profile_new)) {
|
||||
profiles[*index].data = data_new;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void ProfileManager::ParseUserSaveFile() {
|
||||
const FS::IOFile save(
|
||||
FS::GetUserPath(FS::UserPath::NANDDir) + ACC_SAVE_AVATORS_BASE_PATH + "profiles.dat", "rb");
|
||||
|
||||
if (!save.IsOpen()) {
|
||||
LOG_WARNING(Service_ACC, "Failed to load profile data from save data... Generating new "
|
||||
"user 'yuzu' with random UUID.");
|
||||
return;
|
||||
}
|
||||
|
||||
ProfileDataRaw data;
|
||||
if (save.ReadBytes(&data, sizeof(ProfileDataRaw)) != sizeof(ProfileDataRaw)) {
|
||||
LOG_WARNING(Service_ACC, "profiles.dat is smaller than expected... Generating new user "
|
||||
"'yuzu' with random UUID.");
|
||||
return;
|
||||
}
|
||||
|
||||
for (const auto& user : data.users) {
|
||||
if (user.uuid == UUID(Common::INVALID_UUID)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
AddUser({
|
||||
.user_uuid = user.uuid,
|
||||
.username = user.username,
|
||||
.creation_time = user.timestamp,
|
||||
.data = user.extra_data,
|
||||
.is_open = false,
|
||||
});
|
||||
}
|
||||
|
||||
std::stable_partition(profiles.begin(), profiles.end(),
|
||||
[](const ProfileInfo& profile) { return profile.user_uuid; });
|
||||
}
|
||||
|
||||
void ProfileManager::WriteUserSaveFile() {
|
||||
ProfileDataRaw raw{};
|
||||
|
||||
for (std::size_t i = 0; i < MAX_USERS; ++i) {
|
||||
raw.users[i] = {
|
||||
.uuid = profiles[i].user_uuid,
|
||||
.uuid2 = profiles[i].user_uuid,
|
||||
.timestamp = profiles[i].creation_time,
|
||||
.username = profiles[i].username,
|
||||
.extra_data = profiles[i].data,
|
||||
};
|
||||
}
|
||||
|
||||
const auto raw_path = FS::GetUserPath(FS::UserPath::NANDDir) + "/system/save/8000000000000010";
|
||||
if (FS::Exists(raw_path) && !FS::IsDirectory(raw_path)) {
|
||||
FS::Delete(raw_path);
|
||||
}
|
||||
|
||||
const auto path =
|
||||
FS::GetUserPath(FS::UserPath::NANDDir) + ACC_SAVE_AVATORS_BASE_PATH + "profiles.dat";
|
||||
|
||||
if (!FS::CreateFullPath(path)) {
|
||||
LOG_WARNING(Service_ACC, "Failed to create full path of profiles.dat. Create the directory "
|
||||
"nand/system/save/8000000000000010/su/avators to mitigate this "
|
||||
"issue.");
|
||||
return;
|
||||
}
|
||||
|
||||
FS::IOFile save(path, "wb");
|
||||
|
||||
if (!save.IsOpen()) {
|
||||
LOG_WARNING(Service_ACC, "Failed to write save data to file... No changes to user data "
|
||||
"made in current session will be saved.");
|
||||
return;
|
||||
}
|
||||
|
||||
save.Resize(sizeof(ProfileDataRaw));
|
||||
save.WriteBytes(&raw, sizeof(ProfileDataRaw));
|
||||
}
|
||||
|
||||
}; // namespace Service::Account
|
||||
109
src/core/hle/service/acc/profile_manager.h
Executable file
109
src/core/hle/service/acc/profile_manager.h
Executable file
@@ -0,0 +1,109 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <optional>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "common/uuid.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
constexpr std::size_t MAX_USERS{8};
|
||||
constexpr std::size_t profile_username_size{32};
|
||||
|
||||
using ProfileUsername = std::array<u8, profile_username_size>;
|
||||
using UserIDArray = std::array<Common::UUID, MAX_USERS>;
|
||||
|
||||
/// Contains extra data related to a user.
|
||||
/// TODO: RE this structure
|
||||
struct ProfileData {
|
||||
INSERT_PADDING_WORDS(1);
|
||||
u32 icon_id{};
|
||||
u8 bg_color_id{};
|
||||
INSERT_PADDING_BYTES(0x7);
|
||||
INSERT_PADDING_BYTES(0x10);
|
||||
INSERT_PADDING_BYTES(0x60);
|
||||
};
|
||||
static_assert(sizeof(ProfileData) == 0x80, "ProfileData structure has incorrect size");
|
||||
|
||||
/// This holds general information about a users profile. This is where we store all the information
|
||||
/// based on a specific user
|
||||
struct ProfileInfo {
|
||||
Common::UUID user_uuid{Common::INVALID_UUID};
|
||||
ProfileUsername username{};
|
||||
u64 creation_time{};
|
||||
ProfileData data{}; // TODO(ognik): Work out what this is
|
||||
bool is_open{};
|
||||
};
|
||||
|
||||
struct ProfileBase {
|
||||
Common::UUID user_uuid{Common::INVALID_UUID};
|
||||
u64_le timestamp{};
|
||||
ProfileUsername username{};
|
||||
|
||||
// Zero out all the fields to make the profile slot considered "Empty"
|
||||
void Invalidate() {
|
||||
user_uuid.Invalidate();
|
||||
timestamp = 0;
|
||||
username.fill(0);
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(ProfileBase) == 0x38, "ProfileBase is an invalid size");
|
||||
|
||||
/// The profile manager is used for handling multiple user profiles at once. It keeps track of open
|
||||
/// users, all the accounts registered on the "system" as well as fetching individual "ProfileInfo"
|
||||
/// objects
|
||||
class ProfileManager {
|
||||
public:
|
||||
ProfileManager();
|
||||
~ProfileManager();
|
||||
|
||||
ResultCode AddUser(const ProfileInfo& user);
|
||||
ResultCode CreateNewUser(Common::UUID uuid, const ProfileUsername& username);
|
||||
ResultCode CreateNewUser(Common::UUID uuid, const std::string& username);
|
||||
std::optional<Common::UUID> GetUser(std::size_t index) const;
|
||||
std::optional<std::size_t> GetUserIndex(const Common::UUID& uuid) const;
|
||||
std::optional<std::size_t> GetUserIndex(const ProfileInfo& user) const;
|
||||
bool GetProfileBase(std::optional<std::size_t> index, ProfileBase& profile) const;
|
||||
bool GetProfileBase(Common::UUID uuid, ProfileBase& profile) const;
|
||||
bool GetProfileBase(const ProfileInfo& user, ProfileBase& profile) const;
|
||||
bool GetProfileBaseAndData(std::optional<std::size_t> index, ProfileBase& profile,
|
||||
ProfileData& data) const;
|
||||
bool GetProfileBaseAndData(Common::UUID uuid, ProfileBase& profile, ProfileData& data) const;
|
||||
bool GetProfileBaseAndData(const ProfileInfo& user, ProfileBase& profile,
|
||||
ProfileData& data) const;
|
||||
std::size_t GetUserCount() const;
|
||||
std::size_t GetOpenUserCount() const;
|
||||
bool UserExists(Common::UUID uuid) const;
|
||||
bool UserExistsIndex(std::size_t index) const;
|
||||
void OpenUser(Common::UUID uuid);
|
||||
void CloseUser(Common::UUID uuid);
|
||||
UserIDArray GetOpenUsers() const;
|
||||
UserIDArray GetAllUsers() const;
|
||||
Common::UUID GetLastOpenedUser() const;
|
||||
|
||||
bool CanSystemRegisterUser() const;
|
||||
|
||||
bool RemoveUser(Common::UUID uuid);
|
||||
bool SetProfileBase(Common::UUID uuid, const ProfileBase& profile_new);
|
||||
bool SetProfileBaseAndData(Common::UUID uuid, const ProfileBase& profile_new,
|
||||
const ProfileData& data_new);
|
||||
|
||||
private:
|
||||
void ParseUserSaveFile();
|
||||
void WriteUserSaveFile();
|
||||
std::optional<std::size_t> AddToProfiles(const ProfileInfo& profile);
|
||||
bool RemoveProfileAtIndex(std::size_t index);
|
||||
|
||||
std::array<ProfileInfo, MAX_USERS> profiles{};
|
||||
std::size_t user_count{};
|
||||
Common::UUID last_opened_user{Common::INVALID_UUID};
|
||||
};
|
||||
|
||||
}; // namespace Service::Account
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user