early-access version 1255

This commit is contained in:
pineappleEA
2020-12-28 15:15:37 +00:00
parent 84b39492d1
commit 78b48028e1
6254 changed files with 1868140 additions and 0 deletions

View File

@@ -0,0 +1,117 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#include <array>
#include "common/assert.h"
#include "core/hle/kernel/memory/address_space_info.h"
namespace Kernel::Memory {
namespace {
enum : u64 {
Size_1_MB = 0x100000,
Size_2_MB = 2 * Size_1_MB,
Size_128_MB = 128 * Size_1_MB,
Size_1_GB = 0x40000000,
Size_2_GB = 2 * Size_1_GB,
Size_4_GB = 4 * Size_1_GB,
Size_6_GB = 6 * Size_1_GB,
Size_64_GB = 64 * Size_1_GB,
Size_512_GB = 512 * Size_1_GB,
Invalid = std::numeric_limits<u64>::max(),
};
// clang-format off
constexpr std::array<AddressSpaceInfo, 13> AddressSpaceInfos{{
{ .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = AddressSpaceInfo::Type::Is32Bit, },
{ .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = AddressSpaceInfo::Type::Small64Bit, },
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Heap, },
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Alias, },
{ .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Is32Bit, },
{ .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = AddressSpaceInfo::Type::Small64Bit, },
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, },
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Alias, },
{ .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Large64Bit, },
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Is32Bit },
{ .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, },
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Alias, },
{ .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = AddressSpaceInfo::Type::Stack, },
}};
// clang-format on
constexpr bool IsAllowedIndexForAddress(std::size_t index) {
return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Invalid;
}
using IndexArray = std::array<std::size_t, static_cast<std::size_t>(AddressSpaceInfo::Type::Count)>;
constexpr IndexArray AddressSpaceIndices32Bit{
0, 1, 0, 2, 0, 3,
};
constexpr IndexArray AddressSpaceIndices36Bit{
4, 5, 4, 6, 4, 7,
};
constexpr IndexArray AddressSpaceIndices39Bit{
9, 8, 8, 10, 12, 11,
};
constexpr bool IsAllowed32BitType(AddressSpaceInfo::Type type) {
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
type != AddressSpaceInfo::Type::Stack;
}
constexpr bool IsAllowed36BitType(AddressSpaceInfo::Type type) {
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
type != AddressSpaceInfo::Type::Stack;
}
constexpr bool IsAllowed39BitType(AddressSpaceInfo::Type type) {
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Small64Bit;
}
} // namespace
u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
const std::size_t index{static_cast<std::size_t>(type)};
switch (width) {
case 32:
ASSERT(IsAllowed32BitType(type));
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[index]));
return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].address;
case 36:
ASSERT(IsAllowed36BitType(type));
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[index]));
return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].address;
case 39:
ASSERT(IsAllowed39BitType(type));
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
}
UNREACHABLE();
}
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
const std::size_t index{static_cast<std::size_t>(type)};
switch (width) {
case 32:
ASSERT(IsAllowed32BitType(type));
return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].size;
case 36:
ASSERT(IsAllowed36BitType(type));
return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].size;
case 39:
ASSERT(IsAllowed39BitType(type));
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
}
UNREACHABLE();
}
} // namespace Kernel::Memory

View File

@@ -0,0 +1,34 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#pragma once
#include "common/common_types.h"
namespace Kernel::Memory {
struct AddressSpaceInfo final {
enum class Type : u32 {
Is32Bit = 0,
Small64Bit = 1,
Large64Bit = 2,
Heap = 3,
Stack = 4,
Alias = 5,
Count,
};
static u64 GetAddressSpaceStart(std::size_t width, Type type);
static std::size_t GetAddressSpaceSize(std::size_t width, Type type);
const std::size_t bit_width{};
const std::size_t address{};
const std::size_t size{};
const Type type{};
};
} // namespace Kernel::Memory

View File

@@ -0,0 +1,335 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#pragma once
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "core/hle/kernel/memory/memory_types.h"
#include "core/hle/kernel/svc_types.h"
namespace Kernel::Memory {
enum class MemoryState : u32 {
None = 0,
Mask = 0xFF,
All = ~None,
FlagCanReprotect = (1 << 8),
FlagCanDebug = (1 << 9),
FlagCanUseIpc = (1 << 10),
FlagCanUseNonDeviceIpc = (1 << 11),
FlagCanUseNonSecureIpc = (1 << 12),
FlagMapped = (1 << 13),
FlagCode = (1 << 14),
FlagCanAlias = (1 << 15),
FlagCanCodeAlias = (1 << 16),
FlagCanTransfer = (1 << 17),
FlagCanQueryPhysical = (1 << 18),
FlagCanDeviceMap = (1 << 19),
FlagCanAlignedDeviceMap = (1 << 20),
FlagCanIpcUserBuffer = (1 << 21),
FlagReferenceCounted = (1 << 22),
FlagCanMapProcess = (1 << 23),
FlagCanChangeAttribute = (1 << 24),
FlagCanCodeMemory = (1 << 25),
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer |
FlagReferenceCounted | FlagCanChangeAttribute,
FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap |
FlagCanAlignedDeviceMap | FlagReferenceCounted,
FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap,
Free = static_cast<u32>(Svc::MemoryState::Free),
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped,
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
FlagCanCodeMemory,
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted,
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
FlagCanCodeAlias,
AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData |
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory,
Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap |
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
ThreadLocal =
static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible),
NonSecureIpc = static_cast<u32>(Svc::MemoryState::NonSecureIpc) | FlagsMisc |
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
NonDeviceIpc =
static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc,
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
FlagReferenceCounted | FlagCanDebug,
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted,
};
DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
static_assert(static_cast<u32>(MemoryState::Free) == 0x00000000);
static_assert(static_cast<u32>(MemoryState::Io) == 0x00002001);
static_assert(static_cast<u32>(MemoryState::Static) == 0x00042002);
static_assert(static_cast<u32>(MemoryState::Code) == 0x00DC7E03);
static_assert(static_cast<u32>(MemoryState::CodeData) == 0x03FEBD04);
static_assert(static_cast<u32>(MemoryState::Normal) == 0x037EBD05);
static_assert(static_cast<u32>(MemoryState::Shared) == 0x00402006);
static_assert(static_cast<u32>(MemoryState::AliasCode) == 0x00DD7E08);
static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09);
static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A);
static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B);
static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C);
static_assert(static_cast<u32>(MemoryState::Transfered) == 0x015C3C0D);
static_assert(static_cast<u32>(MemoryState::SharedTransfered) == 0x005C380E);
static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F);
static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010);
static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811);
static_assert(static_cast<u32>(MemoryState::NonDeviceIpc) == 0x004C2812);
static_assert(static_cast<u32>(MemoryState::Kernel) == 0x00002013);
static_assert(static_cast<u32>(MemoryState::GeneratedCode) == 0x00402214);
static_assert(static_cast<u32>(MemoryState::CodeOut) == 0x00402015);
enum class MemoryPermission : u8 {
None = 0,
Mask = static_cast<u8>(~None),
Read = 1 << 0,
Write = 1 << 1,
Execute = 1 << 2,
ReadAndWrite = Read | Write,
ReadAndExecute = Read | Execute,
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
Svc::MemoryPermission::Execute),
};
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission);
enum class MemoryAttribute : u8 {
None = 0x00,
Mask = 0x7F,
All = Mask,
DontCareMask = 0x80,
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared),
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
IpcAndDeviceMapped = IpcLocked | DeviceShared,
LockedAndIpcLocked = Locked | IpcLocked,
DeviceSharedAndUncached = DeviceShared | Uncached
};
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
static_assert((static_cast<u8>(MemoryAttribute::Mask) &
static_cast<u8>(MemoryAttribute::DontCareMask)) == 0);
struct MemoryInfo {
VAddr addr{};
std::size_t size{};
MemoryState state{};
MemoryPermission perm{};
MemoryAttribute attribute{};
MemoryPermission original_perm{};
u16 ipc_lock_count{};
u16 device_use_count{};
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
return {
addr,
size,
static_cast<Svc::MemoryState>(state & MemoryState::Mask),
static_cast<Svc::MemoryAttribute>(attribute & MemoryAttribute::Mask),
static_cast<Svc::MemoryPermission>(perm & MemoryPermission::UserMask),
ipc_lock_count,
device_use_count,
};
}
constexpr VAddr GetAddress() const {
return addr;
}
constexpr std::size_t GetSize() const {
return size;
}
constexpr std::size_t GetNumPages() const {
return GetSize() / PageSize;
}
constexpr VAddr GetEndAddress() const {
return GetAddress() + GetSize();
}
constexpr VAddr GetLastAddress() const {
return GetEndAddress() - 1;
}
};
class MemoryBlock final {
friend class MemoryBlockManager;
private:
VAddr addr{};
std::size_t num_pages{};
MemoryState state{MemoryState::None};
u16 ipc_lock_count{};
u16 device_use_count{};
MemoryPermission perm{MemoryPermission::None};
MemoryPermission original_perm{MemoryPermission::None};
MemoryAttribute attribute{MemoryAttribute::None};
public:
static constexpr int Compare(const MemoryBlock& lhs, const MemoryBlock& rhs) {
if (lhs.GetAddress() < rhs.GetAddress()) {
return -1;
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
return 0;
} else {
return 1;
}
}
public:
constexpr MemoryBlock() = default;
constexpr MemoryBlock(VAddr addr_, std::size_t num_pages_, MemoryState state_,
MemoryPermission perm_, MemoryAttribute attribute_)
: addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
constexpr VAddr GetAddress() const {
return addr;
}
constexpr std::size_t GetNumPages() const {
return num_pages;
}
constexpr std::size_t GetSize() const {
return GetNumPages() * PageSize;
}
constexpr VAddr GetEndAddress() const {
return GetAddress() + GetSize();
}
constexpr VAddr GetLastAddress() const {
return GetEndAddress() - 1;
}
constexpr MemoryInfo GetMemoryInfo() const {
return {
GetAddress(), GetSize(), state, perm,
attribute, original_perm, ipc_lock_count, device_use_count,
};
}
void ShareToDevice(MemoryPermission /*new_perm*/) {
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared ||
device_use_count == 0);
attribute |= MemoryAttribute::DeviceShared;
const u16 new_use_count{++device_use_count};
ASSERT(new_use_count > 0);
}
void UnshareToDevice(MemoryPermission /*new_perm*/) {
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared);
const u16 prev_use_count{device_use_count--};
ASSERT(prev_use_count > 0);
if (prev_use_count == 1) {
attribute &= ~MemoryAttribute::DeviceShared;
}
}
private:
constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const {
constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask |
MemoryAttribute::IpcLocked |
MemoryAttribute::DeviceShared};
return state == s && perm == p &&
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
}
constexpr bool HasSameProperties(const MemoryBlock& rhs) const {
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
device_use_count == rhs.device_use_count;
}
constexpr bool Contains(VAddr start) const {
return GetAddress() <= start && start <= GetEndAddress();
}
constexpr void Add(std::size_t count) {
ASSERT(count > 0);
ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
num_pages += count;
}
constexpr void Update(MemoryState new_state, MemoryPermission new_perm,
MemoryAttribute new_attribute) {
ASSERT(original_perm == MemoryPermission::None);
ASSERT((attribute & MemoryAttribute::IpcLocked) == MemoryAttribute::None);
state = new_state;
perm = new_perm;
attribute = static_cast<MemoryAttribute>(
new_attribute |
(attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared)));
}
constexpr MemoryBlock Split(VAddr split_addr) {
ASSERT(GetAddress() < split_addr);
ASSERT(Contains(split_addr));
ASSERT(Common::IsAligned(split_addr, PageSize));
MemoryBlock block;
block.addr = addr;
block.num_pages = (split_addr - GetAddress()) / PageSize;
block.state = state;
block.ipc_lock_count = ipc_lock_count;
block.device_use_count = device_use_count;
block.perm = perm;
block.original_perm = original_perm;
block.attribute = attribute;
addr = split_addr;
num_pages -= block.num_pages;
return block;
}
};
static_assert(std::is_trivially_destructible<MemoryBlock>::value);
} // namespace Kernel::Memory

View File

@@ -0,0 +1,223 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/memory/memory_block_manager.h"
#include "core/hle/kernel/memory/memory_types.h"
namespace Kernel::Memory {
MemoryBlockManager::MemoryBlockManager(VAddr start_addr, VAddr end_addr)
: start_addr{start_addr}, end_addr{end_addr} {
const u64 num_pages{(end_addr - start_addr) / PageSize};
memory_block_tree.emplace_back(start_addr, num_pages, MemoryState::Free, MemoryPermission::None,
MemoryAttribute::None);
}
MemoryBlockManager::iterator MemoryBlockManager::FindIterator(VAddr addr) {
auto node{memory_block_tree.begin()};
while (node != end()) {
const VAddr end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
if (node->GetAddress() <= addr && end_addr - 1 >= addr) {
return node;
}
node = std::next(node);
}
return end();
}
VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
std::size_t num_pages, std::size_t align, std::size_t offset,
std::size_t guard_pages) {
if (num_pages == 0) {
return {};
}
const VAddr region_end{region_start + region_num_pages * PageSize};
const VAddr region_last{region_end - 1};
for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
const auto info{it->GetMemoryInfo()};
if (region_last < info.GetAddress()) {
break;
}
if (info.state != MemoryState::Free) {
continue;
}
VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
area += guard_pages * PageSize;
const VAddr offset_area{Common::AlignDown(area, align) + offset};
area = (area <= offset_area) ? offset_area : offset_area + align;
const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
const VAddr area_last{area_end - 1};
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
area_last <= info.GetLastAddress()) {
return area;
}
}
return {};
}
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
MemoryPermission prev_perm, MemoryAttribute prev_attribute,
MemoryState state, MemoryPermission perm,
MemoryAttribute attribute) {
const VAddr end_addr{addr + num_pages * PageSize};
iterator node{memory_block_tree.begin()};
prev_attribute |= MemoryAttribute::IpcAndDeviceMapped;
while (node != memory_block_tree.end()) {
MemoryBlock* block{&(*node)};
iterator next_node{std::next(node)};
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
if (addr < cur_end_addr && cur_addr < end_addr) {
if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
node = next_node;
continue;
}
iterator new_node{node};
if (addr > cur_addr) {
memory_block_tree.insert(node, block->Split(addr));
}
if (end_addr < cur_end_addr) {
new_node = memory_block_tree.insert(node, block->Split(end_addr));
}
new_node->Update(state, perm, attribute);
MergeAdjacent(new_node, next_node);
}
if (cur_end_addr - 1 >= end_addr - 1) {
break;
}
node = next_node;
}
}
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState state,
MemoryPermission perm, MemoryAttribute attribute) {
const VAddr end_addr{addr + num_pages * PageSize};
iterator node{memory_block_tree.begin()};
while (node != memory_block_tree.end()) {
MemoryBlock* block{&(*node)};
iterator next_node{std::next(node)};
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
if (addr < cur_end_addr && cur_addr < end_addr) {
iterator new_node{node};
if (addr > cur_addr) {
memory_block_tree.insert(node, block->Split(addr));
}
if (end_addr < cur_end_addr) {
new_node = memory_block_tree.insert(node, block->Split(end_addr));
}
new_node->Update(state, perm, attribute);
MergeAdjacent(new_node, next_node);
}
if (cur_end_addr - 1 >= end_addr - 1) {
break;
}
node = next_node;
}
}
void MemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
MemoryPermission perm) {
const VAddr end_addr{addr + num_pages * PageSize};
iterator node{memory_block_tree.begin()};
while (node != memory_block_tree.end()) {
MemoryBlock* block{&(*node)};
iterator next_node{std::next(node)};
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
if (addr < cur_end_addr && cur_addr < end_addr) {
iterator new_node{node};
if (addr > cur_addr) {
memory_block_tree.insert(node, block->Split(addr));
}
if (end_addr < cur_end_addr) {
new_node = memory_block_tree.insert(node, block->Split(end_addr));
}
lock_func(new_node, perm);
MergeAdjacent(new_node, next_node);
}
if (cur_end_addr - 1 >= end_addr - 1) {
break;
}
node = next_node;
}
}
void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
const_iterator it{FindIterator(start)};
MemoryInfo info{};
do {
info = it->GetMemoryInfo();
func(info);
it = std::next(it);
} while (info.addr + info.size - 1 < end - 1 && it != cend());
}
void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
MemoryBlock* block{&(*it)};
auto EraseIt = [&](const iterator it_to_erase) {
if (next_it == it_to_erase) {
next_it = std::next(next_it);
}
memory_block_tree.erase(it_to_erase);
};
if (it != memory_block_tree.begin()) {
MemoryBlock* prev{&(*std::prev(it))};
if (block->HasSameProperties(*prev)) {
const iterator prev_it{std::prev(it)};
prev->Add(block->GetNumPages());
EraseIt(it);
it = prev_it;
block = prev;
}
}
if (it != cend()) {
const MemoryBlock* const next{&(*std::next(it))};
if (block->HasSameProperties(*next)) {
block->Add(next->GetNumPages());
EraseIt(std::next(it));
}
}
}
} // namespace Kernel::Memory

View File

@@ -0,0 +1,66 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <functional>
#include <list>
#include "common/common_types.h"
#include "core/hle/kernel/memory/memory_block.h"
namespace Kernel::Memory {
class MemoryBlockManager final {
public:
using MemoryBlockTree = std::list<MemoryBlock>;
using iterator = MemoryBlockTree::iterator;
using const_iterator = MemoryBlockTree::const_iterator;
public:
MemoryBlockManager(VAddr start_addr, VAddr end_addr);
iterator end() {
return memory_block_tree.end();
}
const_iterator end() const {
return memory_block_tree.end();
}
const_iterator cend() const {
return memory_block_tree.cend();
}
iterator FindIterator(VAddr addr);
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
std::size_t align, std::size_t offset, std::size_t guard_pages);
void Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
MemoryPermission prev_perm, MemoryAttribute prev_attribute, MemoryState state,
MemoryPermission perm, MemoryAttribute attribute);
void Update(VAddr addr, std::size_t num_pages, MemoryState state,
MemoryPermission perm = MemoryPermission::None,
MemoryAttribute attribute = MemoryAttribute::None);
using LockFunc = std::function<void(iterator, MemoryPermission)>;
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, MemoryPermission perm);
using IterateFunc = std::function<void(const MemoryInfo&)>;
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
MemoryBlock& FindBlock(VAddr addr) {
return *FindIterator(addr);
}
private:
void MergeAdjacent(iterator it, iterator& next_it);
[[maybe_unused]] const VAddr start_addr;
[[maybe_unused]] const VAddr end_addr;
MemoryBlockTree memory_block_tree;
};
} // namespace Kernel::Memory

View File

@@ -0,0 +1,71 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
namespace Kernel::Memory {
class MemoryRegion final {
friend class MemoryLayout;
public:
constexpr PAddr StartAddress() const {
return start_address;
}
constexpr PAddr EndAddress() const {
return end_address;
}
private:
constexpr MemoryRegion() = default;
constexpr MemoryRegion(PAddr start_address, PAddr end_address)
: start_address{start_address}, end_address{end_address} {}
const PAddr start_address{};
const PAddr end_address{};
};
class MemoryLayout final {
public:
constexpr const MemoryRegion& Application() const {
return application;
}
constexpr const MemoryRegion& Applet() const {
return applet;
}
constexpr const MemoryRegion& System() const {
return system;
}
static constexpr MemoryLayout GetDefaultLayout() {
constexpr std::size_t application_size{0xcd500000};
constexpr std::size_t applet_size{0x1fb00000};
constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size};
constexpr PAddr application_end_address{Core::DramMemoryMap::End};
constexpr PAddr applet_start_address{application_start_address - applet_size};
constexpr PAddr applet_end_address{applet_start_address + applet_size};
constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd};
constexpr PAddr system_end_address{applet_start_address};
return {application_start_address, application_end_address, applet_start_address,
applet_end_address, system_start_address, system_end_address};
}
private:
constexpr MemoryLayout(PAddr application_start_address, std::size_t application_size,
PAddr applet_start_address, std::size_t applet_size,
PAddr system_start_address, std::size_t system_size)
: application{application_start_address, application_size},
applet{applet_start_address, applet_size}, system{system_start_address, system_size} {}
const MemoryRegion application;
const MemoryRegion applet;
const MemoryRegion system;
};
} // namespace Kernel::Memory

View File

@@ -0,0 +1,175 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/scope_exit.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/memory/memory_manager.h"
#include "core/hle/kernel/memory/page_linked_list.h"
namespace Kernel::Memory {
std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
const auto size{end_address - start_address};
// Calculate metadata sizes
const auto ref_count_size{(size / PageSize) * sizeof(u16)};
const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)};
const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
const auto page_heap_size{PageHeap::CalculateMetadataOverheadSize(size)};
const auto total_metadata_size{manager_size + page_heap_size};
ASSERT(manager_size <= total_metadata_size);
ASSERT(Common::IsAligned(total_metadata_size, PageSize));
// Setup region
pool = new_pool;
// Initialize the manager's KPageHeap
heap.Initialize(start_address, size, page_heap_size);
// Free the memory to the heap
heap.Free(start_address, size / PageSize);
// Update the heap's used size
heap.UpdateUsedSize();
return total_metadata_size;
}
void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
ASSERT(pool < Pool::Count);
managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
}
VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
Direction dir) {
// Early return if we're allocating no pages
if (num_pages == 0) {
return {};
}
// Lock the pool that we're allocating from
const auto pool_index{static_cast<std::size_t>(pool)};
std::lock_guard lock{pool_locks[pool_index]};
// Choose a heap based on our page size request
const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
// Loop, trying to iterate from each block
// TODO (bunnei): Support multiple managers
Impl& chosen_manager{managers[pool_index]};
VAddr allocated_block{chosen_manager.AllocateBlock(heap_index)};
// If we failed to allocate, quit now
if (!allocated_block) {
return {};
}
// If we allocated more than we need, free some
const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
if (allocated_pages > num_pages) {
chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
}
return allocated_block;
}
ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
Direction dir) {
ASSERT(page_list.GetNumPages() == 0);
// Early return if we're allocating no pages
if (num_pages == 0) {
return RESULT_SUCCESS;
}
// Lock the pool that we're allocating from
const auto pool_index{static_cast<std::size_t>(pool)};
std::lock_guard lock{pool_locks[pool_index]};
// Choose a heap based on our page size request
const s32 heap_index{PageHeap::GetBlockIndex(num_pages)};
if (heap_index < 0) {
return ERR_OUT_OF_MEMORY;
}
// TODO (bunnei): Support multiple managers
Impl& chosen_manager{managers[pool_index]};
// Ensure that we don't leave anything un-freed
auto group_guard = detail::ScopeExit([&] {
for (const auto& it : page_list.Nodes()) {
const auto min_num_pages{std::min<size_t>(
it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
chosen_manager.Free(it.GetAddress(), min_num_pages);
}
});
// Keep allocating until we've allocated all our pages
for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)};
while (num_pages >= pages_per_alloc) {
// Allocate a block
VAddr allocated_block{chosen_manager.AllocateBlock(index)};
if (!allocated_block) {
break;
}
// Safely add it to our group
{
auto block_guard = detail::ScopeExit(
[&] { chosen_manager.Free(allocated_block, pages_per_alloc); });
if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)};
result.IsError()) {
return result;
}
block_guard.Cancel();
}
num_pages -= pages_per_alloc;
}
}
// Only succeed if we allocated as many pages as we wanted
if (num_pages) {
return ERR_OUT_OF_MEMORY;
}
// We succeeded!
group_guard.Cancel();
return RESULT_SUCCESS;
}
ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
Direction dir) {
// Early return if we're freeing no pages
if (!num_pages) {
return RESULT_SUCCESS;
}
// Lock the pool that we're freeing from
const auto pool_index{static_cast<std::size_t>(pool)};
std::lock_guard lock{pool_locks[pool_index]};
// TODO (bunnei): Support multiple managers
Impl& chosen_manager{managers[pool_index]};
// Free all of the pages
for (const auto& it : page_list.Nodes()) {
const auto min_num_pages{std::min<size_t>(
it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
chosen_manager.Free(it.GetAddress(), min_num_pages);
}
return RESULT_SUCCESS;
}
} // namespace Kernel::Memory

View File

@@ -0,0 +1,96 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <mutex>
#include "common/common_types.h"
#include "core/hle/kernel/memory/page_heap.h"
#include "core/hle/result.h"
namespace Kernel::Memory {
class PageLinkedList;
class MemoryManager final : NonCopyable {
public:
enum class Pool : u32 {
Application = 0,
Applet = 1,
System = 2,
SystemNonSecure = 3,
Count,
Shift = 4,
Mask = (0xF << Shift),
};
enum class Direction : u32 {
FromFront = 0,
FromBack = 1,
Shift = 0,
Mask = (0xF << Shift),
};
MemoryManager() = default;
constexpr std::size_t GetSize(Pool pool) const {
return managers[static_cast<std::size_t>(pool)].GetSize();
}
void InitializeManager(Pool pool, u64 start_address, u64 end_address);
VAddr AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
Direction dir = Direction::FromFront);
ResultCode Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
Direction dir = Direction::FromFront);
ResultCode Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
Direction dir = Direction::FromFront);
static constexpr std::size_t MaxManagerCount = 10;
private:
class Impl final : NonCopyable {
private:
using RefCount = u16;
private:
PageHeap heap;
Pool pool{};
public:
Impl() = default;
std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
VAddr AllocateBlock(s32 index) {
return heap.AllocateBlock(index);
}
void Free(VAddr addr, std::size_t num_pages) {
heap.Free(addr, num_pages);
}
constexpr std::size_t GetSize() const {
return heap.GetSize();
}
constexpr VAddr GetAddress() const {
return heap.GetAddress();
}
constexpr VAddr GetEndAddress() const {
return heap.GetEndAddress();
}
};
private:
std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks;
std::array<Impl, MaxManagerCount> managers;
};
} // namespace Kernel::Memory

View File

@@ -0,0 +1,18 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "common/common_types.h"
namespace Kernel::Memory {
constexpr std::size_t PageBits{12};
constexpr std::size_t PageSize{1 << PageBits};
using Page = std::array<u8, PageSize>;
} // namespace Kernel::Memory

View File

@@ -0,0 +1,119 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#include "core/core.h"
#include "core/hle/kernel/memory/page_heap.h"
#include "core/memory.h"
namespace Kernel::Memory {
void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
// Check our assumptions
ASSERT(Common::IsAligned((address), PageSize));
ASSERT(Common::IsAligned(size, PageSize));
// Set our members
heap_address = address;
heap_size = size;
// Setup bitmaps
metadata.resize(metadata_size / sizeof(u64));
u64* cur_bitmap_storage{metadata.data()};
for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
const std::size_t next_block_shift{
(i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift,
next_block_shift, cur_bitmap_storage);
}
}
VAddr PageHeap::AllocateBlock(s32 index) {
const std::size_t needed_size{blocks[index].GetSize()};
for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
if (const VAddr addr{blocks[i].PopBlock()}; addr) {
if (const std::size_t allocated_size{blocks[i].GetSize()};
allocated_size > needed_size) {
Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
}
return addr;
}
}
return 0;
}
void PageHeap::FreeBlock(VAddr block, s32 index) {
do {
block = blocks[index++].PushBlock(block);
} while (block != 0);
}
void PageHeap::Free(VAddr addr, std::size_t num_pages) {
// Freeing no pages is a no-op
if (num_pages == 0) {
return;
}
// Find the largest block size that we can free, and free as many as possible
s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1};
const VAddr start{addr};
const VAddr end{(num_pages * PageSize) + addr};
VAddr before_start{start};
VAddr before_end{start};
VAddr after_start{end};
VAddr after_end{end};
while (big_index >= 0) {
const std::size_t block_size{blocks[big_index].GetSize()};
const VAddr big_start{Common::AlignUp((start), block_size)};
const VAddr big_end{Common::AlignDown((end), block_size)};
if (big_start < big_end) {
// Free as many big blocks as we can
for (auto block{big_start}; block < big_end; block += block_size) {
FreeBlock(block, big_index);
}
before_end = big_start;
after_start = big_end;
break;
}
big_index--;
}
ASSERT(big_index >= 0);
// Free space before the big blocks
for (s32 i{big_index - 1}; i >= 0; i--) {
const std::size_t block_size{blocks[i].GetSize()};
while (before_start + block_size <= before_end) {
before_end -= block_size;
FreeBlock(before_end, i);
}
}
// Free space after the big blocks
for (s32 i{big_index - 1}; i >= 0; i--) {
const std::size_t block_size{blocks[i].GetSize()};
while (after_start + block_size <= after_end) {
FreeBlock(after_start, i);
after_start += block_size;
}
}
}
std::size_t PageHeap::CalculateMetadataOverheadSize(std::size_t region_size) {
std::size_t overhead_size = 0;
for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
const std::size_t next_block_shift{
(i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
overhead_size += PageHeap::Block::CalculateMetadataOverheadSize(
region_size, cur_block_shift, next_block_shift);
}
return Common::AlignUp(overhead_size, PageSize);
}
} // namespace Kernel::Memory

View File

@@ -0,0 +1,370 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#pragma once
#include <array>
#include <vector>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/kernel/memory/memory_types.h"
namespace Kernel::Memory {
class PageHeap final : NonCopyable {
public:
static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
const auto target_pages{std::max(num_pages, align_pages)};
for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
if (target_pages <=
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return static_cast<s32>(i);
}
}
return -1;
}
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return i;
}
}
return -1;
}
static constexpr std::size_t GetBlockSize(std::size_t index) {
return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
}
static constexpr std::size_t GetBlockNumPages(std::size_t index) {
return GetBlockSize(index) / PageSize;
}
private:
static constexpr std::size_t NumMemoryBlockPageShifts{7};
static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
};
class Block final : NonCopyable {
private:
class Bitmap final : NonCopyable {
public:
static constexpr std::size_t MaxDepth{4};
private:
std::array<u64*, MaxDepth> bit_storages{};
std::size_t num_bits{};
std::size_t used_depths{};
public:
constexpr Bitmap() = default;
constexpr std::size_t GetNumBits() const {
return num_bits;
}
constexpr s32 GetHighestDepthIndex() const {
return static_cast<s32>(used_depths) - 1;
}
constexpr u64* Initialize(u64* storage, std::size_t size) {
//* Initially, everything is un-set
num_bits = 0;
// Calculate the needed bitmap depth
used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
ASSERT(used_depths <= MaxDepth);
// Set the bitmap pointers
for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
bit_storages[depth] = storage;
size = Common::AlignUp(size, 64) / 64;
storage += size;
}
return storage;
}
s64 FindFreeBlock() const {
uintptr_t offset{};
s32 depth{};
do {
const u64 v{bit_storages[depth][offset]};
if (v == 0) {
// Non-zero depth indicates that a previous level had a free block
ASSERT(depth == 0);
return -1;
}
offset = offset * 64 + Common::CountTrailingZeroes64(v);
++depth;
} while (depth < static_cast<s32>(used_depths));
return static_cast<s64>(offset);
}
constexpr void SetBit(std::size_t offset) {
SetBit(GetHighestDepthIndex(), offset);
num_bits++;
}
constexpr void ClearBit(std::size_t offset) {
ClearBit(GetHighestDepthIndex(), offset);
num_bits--;
}
constexpr bool ClearRange(std::size_t offset, std::size_t count) {
const s32 depth{GetHighestDepthIndex()};
const auto bit_ind{offset / 64};
u64* bits{bit_storages[depth]};
if (count < 64) {
const auto shift{offset % 64};
ASSERT(shift + count <= 64);
// Check that all the bits are set
const u64 mask{((1ULL << count) - 1) << shift};
u64 v{bits[bit_ind]};
if ((v & mask) != mask) {
return false;
}
// Clear the bits
v &= ~mask;
bits[bit_ind] = v;
if (v == 0) {
ClearBit(depth - 1, bit_ind);
}
} else {
ASSERT(offset % 64 == 0);
ASSERT(count % 64 == 0);
// Check that all the bits are set
std::size_t remaining{count};
std::size_t i = 0;
do {
if (bits[bit_ind + i++] != ~u64(0)) {
return false;
}
remaining -= 64;
} while (remaining > 0);
// Clear the bits
remaining = count;
i = 0;
do {
bits[bit_ind + i] = 0;
ClearBit(depth - 1, bit_ind + i);
i++;
remaining -= 64;
} while (remaining > 0);
}
num_bits -= count;
return true;
}
private:
constexpr void SetBit(s32 depth, std::size_t offset) {
while (depth >= 0) {
const auto ind{offset / 64};
const auto which{offset % 64};
const u64 mask{1ULL << which};
u64* bit{std::addressof(bit_storages[depth][ind])};
const u64 v{*bit};
ASSERT((v & mask) == 0);
*bit = v | mask;
if (v) {
break;
}
offset = ind;
depth--;
}
}
constexpr void ClearBit(s32 depth, std::size_t offset) {
while (depth >= 0) {
const auto ind{offset / 64};
const auto which{offset % 64};
const u64 mask{1ULL << which};
u64* bit{std::addressof(bit_storages[depth][ind])};
u64 v{*bit};
ASSERT((v & mask) != 0);
v &= ~mask;
*bit = v;
if (v) {
break;
}
offset = ind;
depth--;
}
}
private:
static constexpr s32 GetRequiredDepth(std::size_t region_size) {
s32 depth = 0;
while (true) {
region_size /= 64;
depth++;
if (region_size == 0) {
return depth;
}
}
}
public:
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size) {
std::size_t overhead_bits = 0;
for (s32 depth{GetRequiredDepth(region_size) - 1}; depth >= 0; depth--) {
region_size = Common::AlignUp(region_size, 64) / 64;
overhead_bits += region_size;
}
return overhead_bits * sizeof(u64);
}
};
private:
Bitmap bitmap;
VAddr heap_address{};
uintptr_t end_offset{};
std::size_t block_shift{};
std::size_t next_block_shift{};
public:
constexpr Block() = default;
constexpr std::size_t GetShift() const {
return block_shift;
}
constexpr std::size_t GetNextShift() const {
return next_block_shift;
}
constexpr std::size_t GetSize() const {
return static_cast<std::size_t>(1) << GetShift();
}
constexpr std::size_t GetNumPages() const {
return GetSize() / PageSize;
}
constexpr std::size_t GetNumFreeBlocks() const {
return bitmap.GetNumBits();
}
constexpr std::size_t GetNumFreePages() const {
return GetNumFreeBlocks() * GetNumPages();
}
constexpr u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
u64* bit_storage) {
// Set shifts
block_shift = bs;
next_block_shift = nbs;
// Align up the address
VAddr end{addr + size};
const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
: (1ULL << block_shift)};
addr = Common::AlignDown((addr), align);
end = Common::AlignUp((end), align);
heap_address = addr;
end_offset = (end - addr) / (1ULL << block_shift);
return bitmap.Initialize(bit_storage, end_offset);
}
constexpr VAddr PushBlock(VAddr address) {
// Set the bit for the free block
std::size_t offset{(address - heap_address) >> GetShift()};
bitmap.SetBit(offset);
// If we have a next shift, try to clear the blocks below and return the address
if (GetNextShift()) {
const auto diff{1ULL << (GetNextShift() - GetShift())};
offset = Common::AlignDown(offset, diff);
if (bitmap.ClearRange(offset, diff)) {
return heap_address + (offset << GetShift());
}
}
// We couldn't coalesce, or we're already as big as possible
return 0;
}
VAddr PopBlock() {
// Find a free block
const s64 soffset{bitmap.FindFreeBlock()};
if (soffset < 0) {
return 0;
}
const auto offset{static_cast<std::size_t>(soffset)};
// Update our tracking and return it
bitmap.ClearBit(offset);
return heap_address + (offset << GetShift());
}
public:
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size,
std::size_t cur_block_shift,
std::size_t next_block_shift) {
const auto cur_block_size{(1ULL << cur_block_shift)};
const auto next_block_size{(1ULL << next_block_shift)};
const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
return Bitmap::CalculateMetadataOverheadSize(
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
}
};
public:
PageHeap() = default;
constexpr VAddr GetAddress() const {
return heap_address;
}
constexpr std::size_t GetSize() const {
return heap_size;
}
constexpr VAddr GetEndAddress() const {
return GetAddress() + GetSize();
}
constexpr std::size_t GetPageOffset(VAddr block) const {
return (block - GetAddress()) / PageSize;
}
void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
VAddr AllocateBlock(s32 index);
void Free(VAddr addr, std::size_t num_pages);
void UpdateUsedSize() {
used_size = heap_size - (GetNumFreePages() * PageSize);
}
static std::size_t CalculateMetadataOverheadSize(std::size_t region_size);
private:
constexpr std::size_t GetNumFreePages() const {
std::size_t num_free{};
for (const auto& block : blocks) {
num_free += block.GetNumFreePages();
}
return num_free;
}
void FreeBlock(VAddr block, s32 index);
VAddr heap_address{};
std::size_t heap_size{};
std::size_t used_size{};
std::array<Block, NumMemoryBlockPageShifts> blocks{};
std::vector<u64> metadata;
};
} // namespace Kernel::Memory

View File

@@ -0,0 +1,92 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <list>
#include "common/assert.h"
#include "common/common_types.h"
#include "core/hle/kernel/memory/memory_types.h"
#include "core/hle/result.h"
namespace Kernel::Memory {
class PageLinkedList final {
public:
class Node final {
public:
constexpr Node(u64 addr, std::size_t num_pages) : addr{addr}, num_pages{num_pages} {}
constexpr u64 GetAddress() const {
return addr;
}
constexpr std::size_t GetNumPages() const {
return num_pages;
}
private:
u64 addr{};
std::size_t num_pages{};
};
public:
PageLinkedList() = default;
PageLinkedList(u64 address, u64 num_pages) {
ASSERT(AddBlock(address, num_pages).IsSuccess());
}
constexpr std::list<Node>& Nodes() {
return nodes;
}
constexpr const std::list<Node>& Nodes() const {
return nodes;
}
std::size_t GetNumPages() const {
std::size_t num_pages = 0;
for (const Node& node : nodes) {
num_pages += node.GetNumPages();
}
return num_pages;
}
bool IsEqual(PageLinkedList& other) const {
auto this_node = nodes.begin();
auto other_node = other.nodes.begin();
while (this_node != nodes.end() && other_node != other.nodes.end()) {
if (this_node->GetAddress() != other_node->GetAddress() ||
this_node->GetNumPages() != other_node->GetNumPages()) {
return false;
}
this_node = std::next(this_node);
other_node = std::next(other_node);
}
return this_node == nodes.end() && other_node == other.nodes.end();
}
ResultCode AddBlock(u64 address, u64 num_pages) {
if (!num_pages) {
return RESULT_SUCCESS;
}
if (!nodes.empty()) {
const auto node = nodes.back();
if (node.GetAddress() + node.GetNumPages() * PageSize == address) {
address = node.GetAddress();
num_pages += node.GetNumPages();
nodes.pop_back();
}
}
nodes.push_back({address, num_pages});
return RESULT_SUCCESS;
}
private:
std::list<Node> nodes;
};
} // namespace Kernel::Memory

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,277 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <mutex>
#include "common/common_types.h"
#include "common/page_table.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/memory/memory_block.h"
#include "core/hle/kernel/memory/memory_manager.h"
#include "core/hle/result.h"
namespace Core {
class System;
}
namespace Kernel::Memory {
class MemoryBlockManager;
class PageTable final : NonCopyable {
public:
explicit PageTable(Core::System& system);
ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
VAddr code_addr, std::size_t code_size,
Memory::MemoryManager::Pool pool);
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, MemoryState state,
MemoryPermission perm);
ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
ResultCode UnmapMemory(VAddr addr, std::size_t size);
ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state,
MemoryPermission perm);
ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm);
MemoryInfo QueryInfo(VAddr addr);
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm);
ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask,
MemoryAttribute value);
ResultCode SetHeapCapacity(std::size_t new_heap_capacity);
ResultVal<VAddr> SetHeapSize(std::size_t size);
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
bool is_map_only, VAddr region_start,
std::size_t region_num_pages, MemoryState state,
MemoryPermission perm, PAddr map_addr = 0);
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
Common::PageTable& PageTableImpl() {
return page_table_impl;
}
const Common::PageTable& PageTableImpl() const {
return page_table_impl;
}
private:
enum class OperationType : u32 {
Map,
MapGroup,
Unmap,
ChangePermissions,
ChangePermissionsAndRefresh,
};
static constexpr MemoryAttribute DefaultMemoryIgnoreAttr =
MemoryAttribute::DontCareMask | MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared;
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
ResultCode MapPages(VAddr addr, const PageLinkedList& page_linked_list, MemoryPermission perm);
void MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end);
bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const;
void AddRegionToPages(VAddr start, std::size_t num_pages, PageLinkedList& page_linked_list);
MemoryInfo QueryInfoImpl(VAddr addr);
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
std::size_t align);
ResultCode Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group,
OperationType operation);
ResultCode Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm,
OperationType operation, PAddr map_addr = 0);
constexpr VAddr GetRegionAddress(MemoryState state) const;
constexpr std::size_t GetRegionSize(MemoryState state) const;
constexpr bool CanContain(VAddr addr, std::size_t size, MemoryState state) const;
constexpr ResultCode CheckMemoryState(const MemoryInfo& info, MemoryState state_mask,
MemoryState state, MemoryPermission perm_mask,
MemoryPermission perm, MemoryAttribute attr_mask,
MemoryAttribute attr) const;
ResultCode CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm,
MemoryAttribute* out_attr, VAddr addr, std::size_t size,
MemoryState state_mask, MemoryState state,
MemoryPermission perm_mask, MemoryPermission perm,
MemoryAttribute attr_mask, MemoryAttribute attr,
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr);
ResultCode CheckMemoryState(VAddr addr, std::size_t size, MemoryState state_mask,
MemoryState state, MemoryPermission perm_mask,
MemoryPermission perm, MemoryAttribute attr_mask,
MemoryAttribute attr,
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) {
return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask,
perm, attr_mask, attr, ignore_attr);
}
std::recursive_mutex page_table_lock;
std::unique_ptr<MemoryBlockManager> block_manager;
public:
constexpr VAddr GetAddressSpaceStart() const {
return address_space_start;
}
constexpr VAddr GetAddressSpaceEnd() const {
return address_space_end;
}
constexpr std::size_t GetAddressSpaceSize() const {
return address_space_end - address_space_start;
}
constexpr VAddr GetHeapRegionStart() const {
return heap_region_start;
}
constexpr VAddr GetHeapRegionEnd() const {
return heap_region_end;
}
constexpr std::size_t GetHeapRegionSize() const {
return heap_region_end - heap_region_start;
}
constexpr VAddr GetAliasRegionStart() const {
return alias_region_start;
}
constexpr VAddr GetAliasRegionEnd() const {
return alias_region_end;
}
constexpr std::size_t GetAliasRegionSize() const {
return alias_region_end - alias_region_start;
}
constexpr VAddr GetStackRegionStart() const {
return stack_region_start;
}
constexpr VAddr GetStackRegionEnd() const {
return stack_region_end;
}
constexpr std::size_t GetStackRegionSize() const {
return stack_region_end - stack_region_start;
}
constexpr VAddr GetKernelMapRegionStart() const {
return kernel_map_region_start;
}
constexpr VAddr GetKernelMapRegionEnd() const {
return kernel_map_region_end;
}
constexpr VAddr GetCodeRegionStart() const {
return code_region_start;
}
constexpr VAddr GetCodeRegionEnd() const {
return code_region_end;
}
constexpr VAddr GetAliasCodeRegionStart() const {
return alias_code_region_start;
}
constexpr VAddr GetAliasCodeRegionSize() const {
return alias_code_region_end - alias_code_region_start;
}
constexpr std::size_t GetAddressSpaceWidth() const {
return address_space_width;
}
constexpr std::size_t GetHeapSize() {
return current_heap_addr - heap_region_start;
}
constexpr std::size_t GetTotalHeapSize() {
return GetHeapSize() + physical_memory_usage;
}
constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
return address_space_start <= address && address + size - 1 <= address_space_end - 1;
}
constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
return alias_region_start > address || address + size - 1 > alias_region_end - 1;
}
constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
return stack_region_start > address || address + size - 1 > stack_region_end - 1;
}
constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
}
constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
return address + size > heap_region_start && heap_region_end > address;
}
constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
return address + size > alias_region_start && alias_region_end > address;
}
constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
if (IsInvalidRegion(address, size)) {
return true;
}
if (IsInsideHeapRegion(address, size)) {
return true;
}
if (IsInsideAliasRegion(address, size)) {
return true;
}
return {};
}
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
return !IsOutsideASLRRegion(address, size);
}
constexpr PAddr GetPhysicalAddr(VAddr addr) {
return page_table_impl.backing_addr[addr >> Memory::PageBits] + addr;
}
private:
constexpr bool Contains(VAddr addr) const {
return address_space_start <= addr && addr <= address_space_end - 1;
}
constexpr bool Contains(VAddr addr, std::size_t size) const {
return address_space_start <= addr && addr < addr + size &&
addr + size - 1 <= address_space_end - 1;
}
constexpr bool IsKernel() const {
return is_kernel;
}
constexpr bool IsAslrEnabled() const {
return is_aslr_enabled;
}
constexpr std::size_t GetNumGuardPages() const {
return IsKernel() ? 1 : 4;
}
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
return (address_space_start <= addr) &&
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
(addr + num_pages * PageSize - 1 <= address_space_end - 1);
}
private:
VAddr address_space_start{};
VAddr address_space_end{};
VAddr heap_region_start{};
VAddr heap_region_end{};
VAddr current_heap_end{};
VAddr alias_region_start{};
VAddr alias_region_end{};
VAddr stack_region_start{};
VAddr stack_region_end{};
VAddr kernel_map_region_start{};
VAddr kernel_map_region_end{};
VAddr code_region_start{};
VAddr code_region_end{};
VAddr alias_code_region_start{};
VAddr alias_code_region_end{};
VAddr current_heap_addr{};
std::size_t heap_capacity{};
std::size_t physical_memory_usage{};
std::size_t max_heap_size{};
std::size_t max_physical_memory_size{};
std::size_t address_space_width{};
bool is_kernel{};
bool is_aslr_enabled{};
MemoryManager::Pool memory_pool{MemoryManager::Pool::Application};
Common::PageTable page_table_impl;
Core::System& system;
};
} // namespace Kernel::Memory

View File

@@ -0,0 +1,163 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#pragma once
#include <atomic>
#include "common/assert.h"
#include "common/common_types.h"
namespace Kernel::Memory {
namespace impl {
class SlabHeapImpl final : NonCopyable {
public:
struct Node {
Node* next{};
};
constexpr SlabHeapImpl() = default;
void Initialize(std::size_t size) {
ASSERT(head == nullptr);
obj_size = size;
}
constexpr std::size_t GetObjectSize() const {
return obj_size;
}
Node* GetHead() const {
return head;
}
void* Allocate() {
Node* ret = head.load();
do {
if (ret == nullptr) {
break;
}
} while (!head.compare_exchange_weak(ret, ret->next));
return ret;
}
void Free(void* obj) {
Node* node = static_cast<Node*>(obj);
Node* cur_head = head.load();
do {
node->next = cur_head;
} while (!head.compare_exchange_weak(cur_head, node));
}
private:
std::atomic<Node*> head{};
std::size_t obj_size{};
};
} // namespace impl
class SlabHeapBase : NonCopyable {
public:
constexpr SlabHeapBase() = default;
constexpr bool Contains(uintptr_t addr) const {
return start <= addr && addr < end;
}
constexpr std::size_t GetSlabHeapSize() const {
return (end - start) / GetObjectSize();
}
constexpr std::size_t GetObjectSize() const {
return impl.GetObjectSize();
}
constexpr uintptr_t GetSlabHeapAddress() const {
return start;
}
std::size_t GetObjectIndexImpl(const void* obj) const {
return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
}
std::size_t GetPeakIndex() const {
return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
}
void* AllocateImpl() {
return impl.Allocate();
}
void FreeImpl(void* obj) {
// Don't allow freeing an object that wasn't allocated from this heap
ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
impl.Free(obj);
}
void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
// Ensure we don't initialize a slab using null memory
ASSERT(memory != nullptr);
// Initialize the base allocator
impl.Initialize(obj_size);
// Set our tracking variables
const std::size_t num_obj = (memory_size / obj_size);
start = reinterpret_cast<uintptr_t>(memory);
end = start + num_obj * obj_size;
peak = start;
// Free the objects
u8* cur = reinterpret_cast<u8*>(end);
for (std::size_t i{}; i < num_obj; i++) {
cur -= obj_size;
impl.Free(cur);
}
}
private:
using Impl = impl::SlabHeapImpl;
Impl impl;
uintptr_t peak{};
uintptr_t start{};
uintptr_t end{};
};
template <typename T>
class SlabHeap final : public SlabHeapBase {
public:
constexpr SlabHeap() : SlabHeapBase() {}
void Initialize(void* memory, std::size_t memory_size) {
InitializeImpl(sizeof(T), memory, memory_size);
}
T* Allocate() {
T* obj = static_cast<T*>(AllocateImpl());
if (obj != nullptr) {
new (obj) T();
}
return obj;
}
void Free(T* obj) {
FreeImpl(obj);
}
constexpr std::size_t GetObjectIndex(const T* obj) const {
return GetObjectIndexImpl(obj);
}
};
} // namespace Kernel::Memory

View File

@@ -0,0 +1,40 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <random>
#include "core/hle/kernel/memory/system_control.h"
namespace Kernel::Memory::SystemControl {
namespace {
template <typename F>
u64 GenerateUniformRange(u64 min, u64 max, F f) {
// Handle the case where the difference is too large to represent.
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
return f();
}
// Iterate until we get a value in range.
const u64 range_size = ((max + 1) - min);
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
while (true) {
if (const u64 rnd = f(); rnd < effective_max) {
return min + (rnd % range_size);
}
}
}
u64 GenerateRandomU64ForInit() {
static std::random_device device;
static std::mt19937 gen(device());
static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
return distribution(gen);
}
} // Anonymous namespace
u64 GenerateRandomRange(u64 min, u64 max) {
return GenerateUniformRange(min, max, GenerateRandomU64ForInit);
}
} // namespace Kernel::Memory::SystemControl

View File

@@ -0,0 +1,13 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
namespace Kernel::Memory::SystemControl {
u64 GenerateRandomRange(u64 min, u64 max);
} // namespace Kernel::Memory::SystemControl