early-access version 1559
This commit is contained in:
@@ -75,10 +75,14 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_
|
||||
if (incoming) {
|
||||
// Populate the object lists with the data in the IPC request.
|
||||
for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_copy; ++handle) {
|
||||
copy_objects.push_back(handle_table.GetGeneric(rp.Pop<Handle>()));
|
||||
const u32 copy_handle{rp.Pop<Handle>()};
|
||||
copy_handles.push_back(copy_handle);
|
||||
copy_objects.push_back(handle_table.GetGeneric(copy_handle));
|
||||
}
|
||||
for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_move; ++handle) {
|
||||
move_objects.push_back(handle_table.GetGeneric(rp.Pop<Handle>()));
|
||||
const u32 move_handle{rp.Pop<Handle>()};
|
||||
move_handles.push_back(move_handle);
|
||||
move_objects.push_back(handle_table.GetGeneric(move_handle));
|
||||
}
|
||||
} else {
|
||||
// For responses we just ignore the handles, they're empty and will be populated when
|
||||
|
@@ -210,6 +210,14 @@ public:
|
||||
/// Helper function to test whether the output buffer at buffer_index can be written
|
||||
bool CanWriteBuffer(std::size_t buffer_index = 0) const;
|
||||
|
||||
Handle GetCopyHandle(std::size_t index) const {
|
||||
return copy_handles.at(index);
|
||||
}
|
||||
|
||||
Handle GetMoveHandle(std::size_t index) const {
|
||||
return move_handles.at(index);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetCopyObject(std::size_t index) {
|
||||
return DynamicObjectCast<T>(copy_objects.at(index));
|
||||
@@ -285,6 +293,8 @@ private:
|
||||
std::shared_ptr<Kernel::ServerSession> server_session;
|
||||
std::shared_ptr<KThread> thread;
|
||||
// TODO(yuriks): Check common usage of this and optimize size accordingly
|
||||
boost::container::small_vector<Handle, 8> move_handles;
|
||||
boost::container::small_vector<Handle, 8> copy_handles;
|
||||
boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects;
|
||||
boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects;
|
||||
boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects;
|
||||
|
@@ -5,45 +5,34 @@
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_sizes.h"
|
||||
#include "core/hle/kernel/k_address_space_info.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
enum : u64 {
|
||||
Size_1_MB = 0x100000,
|
||||
Size_2_MB = 2 * Size_1_MB,
|
||||
Size_128_MB = 128 * Size_1_MB,
|
||||
Size_1_GB = 0x40000000,
|
||||
Size_2_GB = 2 * Size_1_GB,
|
||||
Size_4_GB = 4 * Size_1_GB,
|
||||
Size_6_GB = 6 * Size_1_GB,
|
||||
Size_64_GB = 64 * Size_1_GB,
|
||||
Size_512_GB = 512 * Size_1_GB,
|
||||
Invalid = std::numeric_limits<u64>::max(),
|
||||
};
|
||||
|
||||
// clang-format off
|
||||
constexpr std::array<KAddressSpaceInfo, 13> AddressSpaceInfos{{
|
||||
{ .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, },
|
||||
{ .bit_width = 32, .address = Common::Size_2_MB , .size = Common::Size_1_GB - Common::Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 32, .address = Common::Size_1_GB , .size = Common::Size_4_GB - Common::Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 32, .address = Common::Size_Invalid, .size = Common::Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 32, .address = Common::Size_Invalid, .size = Common::Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Common::Size_128_MB , .size = Common::Size_2_GB - Common::Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 36, .address = Common::Size_2_GB , .size = Common::Size_64_GB - Common::Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 36, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Common::Size_128_MB , .size = Common::Size_512_GB - Common::Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, },
|
||||
{ .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall },
|
||||
{ .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, },
|
||||
}};
|
||||
// clang-format on
|
||||
|
||||
constexpr bool IsAllowedIndexForAddress(std::size_t index) {
|
||||
return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Invalid;
|
||||
return index < AddressSpaceInfos.size() &&
|
||||
AddressSpaceInfos[index].address != Common::Size_Invalid;
|
||||
}
|
||||
|
||||
using IndexArray =
|
||||
|
@@ -1,23 +1,69 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/common_sizes.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/k_memory_region.h"
|
||||
#include "core/hle/kernel/k_memory_region_type.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024;
|
||||
constexpr std::size_t L1BlockSize = Common::Size_1_GB;
|
||||
constexpr std::size_t L2BlockSize = Common::Size_2_MB;
|
||||
|
||||
constexpr std::size_t GetMaximumOverheadSize(std::size_t size) {
|
||||
return (Common::DivideUp(size, L1BlockSize) + Common::DivideUp(size, L2BlockSize)) * PageSize;
|
||||
}
|
||||
|
||||
constexpr std::size_t MainMemorySize = Common::Size_4_GB;
|
||||
constexpr std::size_t MainMemorySizeMax = Common::Size_8_GB;
|
||||
|
||||
constexpr std::size_t ReservedEarlyDramSize = 0x60000;
|
||||
constexpr std::size_t DramPhysicalAddress = 0x80000000;
|
||||
|
||||
constexpr std::size_t KernelAslrAlignment = Common::Size_2_MB;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39;
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48;
|
||||
|
||||
constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceEnd =
|
||||
KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
|
||||
constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1ULL;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceSize =
|
||||
KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
|
||||
constexpr std::size_t KernelVirtualAddressCodeBase = KernelVirtualAddressSpaceBase;
|
||||
constexpr std::size_t KernelVirtualAddressCodeSize = 0x62000;
|
||||
constexpr std::size_t KernelVirtualAddressCodeEnd =
|
||||
KernelVirtualAddressCodeBase + KernelVirtualAddressCodeSize;
|
||||
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceBase = 0ULL;
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceEnd =
|
||||
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceWidth;
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ULL;
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceSize =
|
||||
KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase;
|
||||
constexpr std::size_t KernelPhysicalAddressCodeBase = DramPhysicalAddress + ReservedEarlyDramSize;
|
||||
|
||||
constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemorySizeMax);
|
||||
constexpr std::size_t KernelInitialPageHeapSize = Common::Size_128_KB;
|
||||
|
||||
constexpr std::size_t KernelSlabHeapDataSize = Common::Size_5_MB;
|
||||
constexpr std::size_t KernelSlabHeapGapsSize = Common::Size_2_MB - Common::Size_64_KB;
|
||||
constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize;
|
||||
|
||||
// NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
|
||||
constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000ULL;
|
||||
|
||||
constexpr std::size_t KernelResourceSize =
|
||||
KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
|
||||
|
||||
constexpr bool IsKernelAddressKey(VAddr key) {
|
||||
return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
|
||||
@@ -27,64 +73,327 @@ constexpr bool IsKernelAddress(VAddr address) {
|
||||
return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
|
||||
}
|
||||
|
||||
class KMemoryRegion final {
|
||||
friend class KMemoryLayout;
|
||||
|
||||
public:
|
||||
constexpr PAddr StartAddress() const {
|
||||
return start_address;
|
||||
}
|
||||
|
||||
constexpr PAddr EndAddress() const {
|
||||
return end_address;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr KMemoryRegion() = default;
|
||||
constexpr KMemoryRegion(PAddr start_address, PAddr end_address)
|
||||
: start_address{start_address}, end_address{end_address} {}
|
||||
|
||||
const PAddr start_address{};
|
||||
const PAddr end_address{};
|
||||
};
|
||||
|
||||
class KMemoryLayout final {
|
||||
public:
|
||||
constexpr const KMemoryRegion& Application() const {
|
||||
return application;
|
||||
KMemoryLayout();
|
||||
|
||||
KMemoryRegionTree& GetVirtualMemoryRegionTree() {
|
||||
return virtual_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetVirtualMemoryRegionTree() const {
|
||||
return virtual_tree;
|
||||
}
|
||||
KMemoryRegionTree& GetPhysicalMemoryRegionTree() {
|
||||
return physical_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const {
|
||||
return physical_tree;
|
||||
}
|
||||
KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() {
|
||||
return virtual_linear_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const {
|
||||
return virtual_linear_tree;
|
||||
}
|
||||
KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() {
|
||||
return physical_linear_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const {
|
||||
return physical_linear_tree;
|
||||
}
|
||||
|
||||
constexpr const KMemoryRegion& Applet() const {
|
||||
return applet;
|
||||
VAddr GetLinearVirtualAddress(PAddr address) const {
|
||||
return address + linear_phys_to_virt_diff;
|
||||
}
|
||||
PAddr GetLinearPhysicalAddress(VAddr address) const {
|
||||
return address + linear_virt_to_phys_diff;
|
||||
}
|
||||
|
||||
constexpr const KMemoryRegion& System() const {
|
||||
return system;
|
||||
const KMemoryRegion* FindVirtual(VAddr address) const {
|
||||
return Find(address, GetVirtualMemoryRegionTree());
|
||||
}
|
||||
const KMemoryRegion* FindPhysical(PAddr address) const {
|
||||
return Find(address, GetPhysicalMemoryRegionTree());
|
||||
}
|
||||
|
||||
static constexpr KMemoryLayout GetDefaultLayout() {
|
||||
constexpr std::size_t application_size{0xcd500000};
|
||||
constexpr std::size_t applet_size{0x1fb00000};
|
||||
constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size};
|
||||
constexpr PAddr application_end_address{Core::DramMemoryMap::End};
|
||||
constexpr PAddr applet_start_address{application_start_address - applet_size};
|
||||
constexpr PAddr applet_end_address{applet_start_address + applet_size};
|
||||
constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd};
|
||||
constexpr PAddr system_end_address{applet_start_address};
|
||||
return {application_start_address, application_end_address, applet_start_address,
|
||||
applet_end_address, system_start_address, system_end_address};
|
||||
const KMemoryRegion* FindVirtualLinear(VAddr address) const {
|
||||
return Find(address, GetVirtualLinearMemoryRegionTree());
|
||||
}
|
||||
const KMemoryRegion* FindPhysicalLinear(PAddr address) const {
|
||||
return Find(address, GetPhysicalLinearMemoryRegionTree());
|
||||
}
|
||||
|
||||
VAddr GetMainStackTopAddress(s32 core_id) const {
|
||||
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscMainStack);
|
||||
}
|
||||
VAddr GetIdleStackTopAddress(s32 core_id) const {
|
||||
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscIdleStack);
|
||||
}
|
||||
VAddr GetExceptionStackTopAddress(s32 core_id) const {
|
||||
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
|
||||
}
|
||||
|
||||
VAddr GetSlabRegionAddress() const {
|
||||
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab))
|
||||
.GetAddress();
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
|
||||
return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
|
||||
}
|
||||
PAddr GetDevicePhysicalAddress(KMemoryRegionType type) const {
|
||||
return GetDeviceRegion(type).GetAddress();
|
||||
}
|
||||
VAddr GetDeviceVirtualAddress(KMemoryRegionType type) const {
|
||||
return GetDeviceRegion(type).GetPairAddress();
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetPoolManagementRegion() const {
|
||||
return Dereference(
|
||||
GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramPoolManagement));
|
||||
}
|
||||
const KMemoryRegion& GetPageTableHeapRegion() const {
|
||||
return Dereference(
|
||||
GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelPtHeap));
|
||||
}
|
||||
const KMemoryRegion& GetKernelStackRegion() const {
|
||||
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelStack));
|
||||
}
|
||||
const KMemoryRegion& GetTempRegion() const {
|
||||
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelTemp));
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetKernelTraceBufferRegion() const {
|
||||
return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(
|
||||
KMemoryRegionType_VirtualDramKernelTraceBuffer));
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const {
|
||||
return Dereference(FindVirtualLinear(address));
|
||||
}
|
||||
|
||||
const KMemoryRegion* GetPhysicalKernelTraceBufferRegion() const {
|
||||
return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer);
|
||||
}
|
||||
const KMemoryRegion* GetPhysicalOnMemoryBootImageRegion() const {
|
||||
return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_OnMemoryBootImage);
|
||||
}
|
||||
const KMemoryRegion* GetPhysicalDTBRegion() const {
|
||||
return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DTB);
|
||||
}
|
||||
|
||||
bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address) const {
|
||||
return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
|
||||
KMemoryRegionType_DramUserPool);
|
||||
}
|
||||
bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address) const {
|
||||
return IsTypedAddress(region, address, GetVirtualLinearMemoryRegionTree(),
|
||||
KMemoryRegionType_VirtualDramUserPool);
|
||||
}
|
||||
|
||||
bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address, size_t size) const {
|
||||
return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
|
||||
KMemoryRegionType_DramUserPool);
|
||||
}
|
||||
bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address, size_t size) const {
|
||||
return IsTypedAddress(region, address, size, GetVirtualLinearMemoryRegionTree(),
|
||||
KMemoryRegionType_VirtualDramUserPool);
|
||||
}
|
||||
|
||||
bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address) const {
|
||||
return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
|
||||
static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
|
||||
}
|
||||
bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address,
|
||||
size_t size) const {
|
||||
return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
|
||||
static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
|
||||
}
|
||||
|
||||
std::pair<size_t, size_t> GetTotalAndKernelMemorySizes() const {
|
||||
size_t total_size = 0, kernel_size = 0;
|
||||
for (const auto& region : GetPhysicalMemoryRegionTree()) {
|
||||
if (region.IsDerivedFrom(KMemoryRegionType_Dram)) {
|
||||
total_size += region.GetSize();
|
||||
if (!region.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||
kernel_size += region.GetSize();
|
||||
}
|
||||
}
|
||||
}
|
||||
return std::make_pair(total_size, kernel_size);
|
||||
}
|
||||
|
||||
void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
|
||||
VAddr linear_virtual_start);
|
||||
static size_t GetResourceRegionSizeForInit();
|
||||
|
||||
auto GetKernelRegionExtents() const {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel);
|
||||
}
|
||||
auto GetKernelCodeRegionExtents() const {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode);
|
||||
}
|
||||
auto GetKernelStackRegionExtents() const {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelStack);
|
||||
}
|
||||
auto GetKernelMiscRegionExtents() const {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelMisc);
|
||||
}
|
||||
auto GetKernelSlabRegionExtents() const {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelSlab);
|
||||
}
|
||||
|
||||
auto GetLinearRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
}
|
||||
|
||||
auto GetLinearRegionVirtualExtents() const {
|
||||
const auto physical = GetLinearRegionPhysicalExtents();
|
||||
return KMemoryRegion(GetLinearVirtualAddress(physical.GetAddress()),
|
||||
GetLinearVirtualAddress(physical.GetLastAddress()), 0,
|
||||
KMemoryRegionType_None);
|
||||
}
|
||||
|
||||
auto GetMainMemoryPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Dram);
|
||||
}
|
||||
auto GetCarveoutRegionExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionAttr_CarveoutProtected);
|
||||
}
|
||||
|
||||
auto GetKernelRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelBase);
|
||||
}
|
||||
auto GetKernelCodeRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelCode);
|
||||
}
|
||||
auto GetKernelSlabRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelSlab);
|
||||
}
|
||||
auto GetKernelPageTableHeapRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelPtHeap);
|
||||
}
|
||||
auto GetKernelInitPageTableRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelInitPt);
|
||||
}
|
||||
|
||||
auto GetKernelPoolManagementRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramPoolManagement);
|
||||
}
|
||||
auto GetKernelPoolPartitionRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramPoolPartition);
|
||||
}
|
||||
auto GetKernelSystemPoolRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramSystemPool);
|
||||
}
|
||||
auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramSystemNonSecurePool);
|
||||
}
|
||||
auto GetKernelAppletPoolRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramAppletPool);
|
||||
}
|
||||
auto GetKernelApplicationPoolRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramApplicationPool);
|
||||
}
|
||||
|
||||
auto GetKernelTraceBufferRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_KernelTraceBuffer);
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr KMemoryLayout(PAddr application_start_address, std::size_t application_size,
|
||||
PAddr applet_start_address, std::size_t applet_size,
|
||||
PAddr system_start_address, std::size_t system_size)
|
||||
: application{application_start_address, application_size},
|
||||
applet{applet_start_address, applet_size}, system{system_start_address, system_size} {}
|
||||
template <typename AddressType>
|
||||
static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address,
|
||||
const KMemoryRegionTree& tree, KMemoryRegionType type) {
|
||||
// Check if the cached region already contains the address.
|
||||
if (region != nullptr && region->Contains(address)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const KMemoryRegion application;
|
||||
const KMemoryRegion applet;
|
||||
const KMemoryRegion system;
|
||||
// Find the containing region, and update the cache.
|
||||
if (const KMemoryRegion* found = tree.Find(address);
|
||||
found != nullptr && found->IsDerivedFrom(type)) {
|
||||
region = found;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, size_t size,
|
||||
const KMemoryRegionTree& tree, KMemoryRegionType type) {
|
||||
// Get the end of the checked region.
|
||||
const u64 last_address = address + size - 1;
|
||||
|
||||
// Walk the tree to verify the region is correct.
|
||||
const KMemoryRegion* cur =
|
||||
(region != nullptr && region->Contains(address)) ? region : tree.Find(address);
|
||||
while (cur != nullptr && cur->IsDerivedFrom(type)) {
|
||||
if (last_address <= cur->GetLastAddress()) {
|
||||
region = cur;
|
||||
return true;
|
||||
}
|
||||
|
||||
cur = cur->GetNext();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
static const KMemoryRegion* Find(AddressType address, const KMemoryRegionTree& tree) {
|
||||
return tree.Find(address);
|
||||
}
|
||||
|
||||
static KMemoryRegion& Dereference(KMemoryRegion* region) {
|
||||
ASSERT(region != nullptr);
|
||||
return *region;
|
||||
}
|
||||
|
||||
static const KMemoryRegion& Dereference(const KMemoryRegion* region) {
|
||||
ASSERT(region != nullptr);
|
||||
return *region;
|
||||
}
|
||||
|
||||
VAddr GetStackTopAddress(s32 core_id, KMemoryRegionType type) const {
|
||||
const auto& region = Dereference(
|
||||
GetVirtualMemoryRegionTree().FindByTypeAndAttribute(type, static_cast<u32>(core_id)));
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
return region.GetEndAddress();
|
||||
}
|
||||
|
||||
private:
|
||||
u64 linear_phys_to_virt_diff{};
|
||||
u64 linear_virt_to_phys_diff{};
|
||||
KMemoryRegionAllocator memory_region_allocator;
|
||||
KMemoryRegionTree virtual_tree;
|
||||
KMemoryRegionTree physical_tree;
|
||||
KMemoryRegionTree virtual_linear_tree;
|
||||
KMemoryRegionTree physical_linear_tree;
|
||||
};
|
||||
|
||||
namespace Init {
|
||||
|
||||
// These should be generic, regardless of board.
|
||||
void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout);
|
||||
|
||||
// These may be implemented in a board-specific manner.
|
||||
void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout);
|
||||
void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout);
|
||||
|
||||
} // namespace Init
|
||||
|
||||
} // namespace Kernel
|
||||
|
@@ -173,4 +173,16 @@ ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_page
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
std::size_t KMemoryManager::Impl::CalculateManagementOverheadSize(std::size_t region_size) {
|
||||
const std::size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
|
||||
const std::size_t optimize_map_size =
|
||||
(Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
|
||||
Common::BitSize<u64>()) *
|
||||
sizeof(u64);
|
||||
const std::size_t manager_meta_size =
|
||||
Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
|
||||
const std::size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size);
|
||||
return manager_meta_size + page_heap_size;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
@@ -29,6 +29,10 @@ public:
|
||||
|
||||
Shift = 4,
|
||||
Mask = (0xF << Shift),
|
||||
|
||||
// Aliases.
|
||||
Unsafe = Application,
|
||||
Secure = System,
|
||||
};
|
||||
|
||||
enum class Direction : u32 {
|
||||
@@ -56,6 +60,10 @@ public:
|
||||
static constexpr std::size_t MaxManagerCount = 10;
|
||||
|
||||
public:
|
||||
static std::size_t CalculateManagementOverheadSize(std::size_t region_size) {
|
||||
return Impl::CalculateManagementOverheadSize(region_size);
|
||||
}
|
||||
|
||||
static constexpr u32 EncodeOption(Pool pool, Direction dir) {
|
||||
return (static_cast<u32>(pool) << static_cast<u32>(Pool::Shift)) |
|
||||
(static_cast<u32>(dir) << static_cast<u32>(Direction::Shift));
|
||||
@@ -85,6 +93,16 @@ private:
|
||||
KPageHeap heap;
|
||||
Pool pool{};
|
||||
|
||||
public:
|
||||
static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
|
||||
|
||||
static constexpr std::size_t CalculateOptimizedProcessOverheadSize(
|
||||
std::size_t region_size) {
|
||||
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
|
||||
Common::BitSize<u64>()) *
|
||||
sizeof(u64);
|
||||
}
|
||||
|
||||
public:
|
||||
Impl() = default;
|
||||
|
||||
|
@@ -62,7 +62,7 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
|
||||
}
|
||||
|
||||
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
||||
std::scoped_lock lock{guard};
|
||||
KScopedSpinLock lk{guard};
|
||||
if (KThread* prev_highest_thread = state.highest_priority_thread;
|
||||
prev_highest_thread != highest_thread) {
|
||||
if (prev_highest_thread != nullptr) {
|
||||
@@ -637,11 +637,11 @@ void KScheduler::RescheduleCurrentCore() {
|
||||
if (phys_core.IsInterrupted()) {
|
||||
phys_core.ClearInterrupt();
|
||||
}
|
||||
guard.lock();
|
||||
guard.Lock();
|
||||
if (state.needs_scheduling.load()) {
|
||||
Schedule();
|
||||
} else {
|
||||
guard.unlock();
|
||||
guard.Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -669,7 +669,7 @@ void KScheduler::Unload(KThread* thread) {
|
||||
} else {
|
||||
prev_thread = nullptr;
|
||||
}
|
||||
thread->context_guard.unlock();
|
||||
thread->context_guard.Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -713,7 +713,7 @@ void KScheduler::ScheduleImpl() {
|
||||
|
||||
// If we're not actually switching thread, there's nothing to do.
|
||||
if (next_thread == current_thread.load()) {
|
||||
guard.unlock();
|
||||
guard.Unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -732,7 +732,7 @@ void KScheduler::ScheduleImpl() {
|
||||
} else {
|
||||
old_context = &idle_thread->GetHostContext();
|
||||
}
|
||||
guard.unlock();
|
||||
guard.Unlock();
|
||||
|
||||
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
||||
/// When a thread wakes up, the scheduler may have changed to other in another core.
|
||||
@@ -748,24 +748,24 @@ void KScheduler::OnSwitch(void* this_scheduler) {
|
||||
void KScheduler::SwitchToCurrent() {
|
||||
while (true) {
|
||||
{
|
||||
std::scoped_lock lock{guard};
|
||||
KScopedSpinLock lk{guard};
|
||||
current_thread.store(state.highest_priority_thread);
|
||||
state.needs_scheduling.store(false);
|
||||
}
|
||||
const auto is_switch_pending = [this] {
|
||||
std::scoped_lock lock{guard};
|
||||
KScopedSpinLock lk{guard};
|
||||
return state.needs_scheduling.load();
|
||||
};
|
||||
do {
|
||||
auto next_thread = current_thread.load();
|
||||
if (next_thread != nullptr) {
|
||||
next_thread->context_guard.lock();
|
||||
next_thread->context_guard.Lock();
|
||||
if (next_thread->GetRawState() != ThreadState::Runnable) {
|
||||
next_thread->context_guard.unlock();
|
||||
next_thread->context_guard.Unlock();
|
||||
break;
|
||||
}
|
||||
if (next_thread->GetActiveCore() != core_id) {
|
||||
next_thread->context_guard.unlock();
|
||||
next_thread->context_guard.Unlock();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@@ -2,19 +2,16 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/hle/kernel/global_scheduler_context.h"
|
||||
#include "core/hle/kernel/k_priority_queue.h"
|
||||
#include "core/hle/kernel/k_scheduler_lock.h"
|
||||
#include "core/hle/kernel/k_scoped_lock.h"
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
|
||||
namespace Common {
|
||||
class Fiber;
|
||||
@@ -195,7 +192,7 @@ private:
|
||||
u64 last_context_switch_time{};
|
||||
const s32 core_id;
|
||||
|
||||
Common::SpinLock guard{};
|
||||
KSpinLock guard{};
|
||||
};
|
||||
|
||||
class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
|
||||
|
@@ -2,14 +2,11 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
|
||||
@@ -34,7 +31,7 @@ public:
|
||||
} else {
|
||||
// Otherwise, we want to disable scheduling and acquire the spinlock.
|
||||
SchedulerType::DisableScheduling(kernel);
|
||||
spin_lock.lock();
|
||||
spin_lock.Lock();
|
||||
|
||||
// For debug, ensure that our state is valid.
|
||||
ASSERT(lock_count == 0);
|
||||
@@ -58,7 +55,7 @@ public:
|
||||
|
||||
// Note that we no longer hold the lock, and unlock the spinlock.
|
||||
owner_thread = nullptr;
|
||||
spin_lock.unlock();
|
||||
spin_lock.Unlock();
|
||||
|
||||
// Enable scheduling, and perform a rescheduling operation.
|
||||
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
|
||||
@@ -67,7 +64,7 @@ public:
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
Common::SpinLock spin_lock{};
|
||||
KAlignedSpinLock spin_lock{};
|
||||
s32 lock_count{};
|
||||
KThread* owner_thread{};
|
||||
};
|
||||
|
@@ -28,6 +28,12 @@ private:
|
||||
std::atomic_flag lck = ATOMIC_FLAG_INIT;
|
||||
};
|
||||
|
||||
// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
|
||||
using KAlignedSpinLock = KSpinLock;
|
||||
using KNotAlignedSpinLock = KSpinLock;
|
||||
|
||||
using KScopedSpinLock = KScopedLock<KSpinLock>;
|
||||
using KScopedAlignedSpinLock = KScopedLock<KAlignedSpinLock>;
|
||||
using KScopedNotAlignedSpinLock = KScopedLock<KNotAlignedSpinLock>;
|
||||
|
||||
} // namespace Kernel
|
||||
|
@@ -6,14 +6,18 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
#define BOARD_NINTENDO_NX
|
||||
|
||||
#ifdef BOARD_NINTENDO_NX
|
||||
|
||||
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KSystemControl {
|
||||
public:
|
||||
KSystemControl() = default;
|
||||
|
||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||
static u64 GenerateRandomU64();
|
||||
};
|
||||
using Kernel::Board::Nintendo::Nx::KSystemControl;
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
#else
|
||||
#error "Unknown board for KSystemControl"
|
||||
#endif
|
||||
|
@@ -14,10 +14,10 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/hle/kernel/k_affinity_mask.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
@@ -732,7 +732,7 @@ private:
|
||||
s8 priority_inheritance_count{};
|
||||
bool resource_limit_release_hint{};
|
||||
StackParameters stack_parameters{};
|
||||
Common::SpinLock context_guard{};
|
||||
KSpinLock context_guard{};
|
||||
|
||||
// For emulation
|
||||
std::shared_ptr<Common::Fiber> host_context{};
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <utility>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_sizes.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/thread.h"
|
||||
@@ -268,45 +269,314 @@ struct KernelCore::Impl {
|
||||
return schedulers[thread_id]->GetCurrentThread();
|
||||
}
|
||||
|
||||
void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) {
|
||||
// Insert the root region for the virtual memory tree, from which all other regions will
|
||||
// derive.
|
||||
memory_layout.GetVirtualMemoryRegionTree().InsertDirectly(
|
||||
KernelVirtualAddressSpaceBase,
|
||||
KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1);
|
||||
|
||||
// Insert the root region for the physical memory tree, from which all other regions will
|
||||
// derive.
|
||||
memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly(
|
||||
KernelPhysicalAddressSpaceBase,
|
||||
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
|
||||
|
||||
// Save start and end for ease of use.
|
||||
const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase;
|
||||
const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd;
|
||||
|
||||
// Setup the containing kernel region.
|
||||
constexpr size_t KernelRegionSize = Common::Size_1_GB;
|
||||
constexpr size_t KernelRegionAlign = Common::Size_1_GB;
|
||||
constexpr VAddr kernel_region_start =
|
||||
Common::AlignDown(code_start_virt_addr, KernelRegionAlign);
|
||||
size_t kernel_region_size = KernelRegionSize;
|
||||
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
|
||||
kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
|
||||
}
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
|
||||
|
||||
// Setup the code region.
|
||||
constexpr size_t CodeRegionAlign = PageSize;
|
||||
constexpr VAddr code_region_start =
|
||||
Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
|
||||
constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
|
||||
constexpr size_t code_region_size = code_region_end - code_region_start;
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
code_region_start, code_region_size, KMemoryRegionType_KernelCode));
|
||||
|
||||
// Setup board-specific device physical regions.
|
||||
Init::SetupDevicePhysicalMemoryRegions(memory_layout);
|
||||
|
||||
// Determine the amount of space needed for the misc region.
|
||||
size_t misc_region_needed_size;
|
||||
{
|
||||
// Each core has a one page stack for all three stack types (Main, Idle, Exception).
|
||||
misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize));
|
||||
|
||||
// Account for each auto-map device.
|
||||
for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
|
||||
// Check that the region is valid.
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
|
||||
// Account for the region.
|
||||
misc_region_needed_size +=
|
||||
PageSize + (Common::AlignUp(region.GetLastAddress(), PageSize) -
|
||||
Common::AlignDown(region.GetAddress(), PageSize));
|
||||
}
|
||||
}
|
||||
|
||||
// Multiply the needed size by three, to account for the need for guard space.
|
||||
misc_region_needed_size *= 3;
|
||||
}
|
||||
|
||||
// Decide on the actual size for the misc region.
|
||||
constexpr size_t MiscRegionAlign = KernelAslrAlignment;
|
||||
constexpr size_t MiscRegionMinimumSize = Common::Size_32_MB;
|
||||
const size_t misc_region_size = Common::AlignUp(
|
||||
std::max(misc_region_needed_size, MiscRegionMinimumSize), MiscRegionAlign);
|
||||
ASSERT(misc_region_size > 0);
|
||||
|
||||
// Setup the misc region.
|
||||
const VAddr misc_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
|
||||
|
||||
// Setup the stack region.
|
||||
constexpr size_t StackRegionSize = Common::Size_14_MB;
|
||||
constexpr size_t StackRegionAlign = KernelAslrAlignment;
|
||||
const VAddr stack_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
|
||||
|
||||
// Determine the size of the resource region.
|
||||
const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit();
|
||||
|
||||
// Determine the size of the slab region.
|
||||
const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize);
|
||||
ASSERT(slab_region_size <= resource_region_size);
|
||||
|
||||
// Setup the slab region.
|
||||
const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase;
|
||||
const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size;
|
||||
const PAddr slab_start_phys_addr = code_end_phys_addr;
|
||||
const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
|
||||
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
|
||||
const size_t slab_region_needed_size =
|
||||
Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
|
||||
Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
|
||||
const VAddr slab_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
|
||||
(code_end_phys_addr % SlabRegionAlign);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
|
||||
|
||||
// Setup the temp region.
|
||||
constexpr size_t TempRegionSize = Common::Size_128_MB;
|
||||
constexpr size_t TempRegionAlign = KernelAslrAlignment;
|
||||
const VAddr temp_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
|
||||
KMemoryRegionType_KernelTemp));
|
||||
|
||||
// Automatically map in devices that have auto-map attributes.
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
// We only care about kernel regions.
|
||||
if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check whether we should map the region.
|
||||
if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this region has already been mapped, no need to consider it.
|
||||
if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check that the region is valid.
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
|
||||
// Set the attribute to note we've mapped this region.
|
||||
region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
|
||||
|
||||
// Create a virtual pair region and insert it into the tree.
|
||||
const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
|
||||
const size_t map_size =
|
||||
Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
|
||||
const VAddr map_virt_addr =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
|
||||
map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
|
||||
region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
|
||||
}
|
||||
|
||||
Init::SetupDramPhysicalMemoryRegions(memory_layout);
|
||||
|
||||
// Insert a physical region for the kernel code region.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
|
||||
|
||||
// Insert a physical region for the kernel slab region.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
|
||||
|
||||
// Determine size available for kernel page table heaps, requiring > 8 MB.
|
||||
const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
||||
const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
|
||||
ASSERT(page_table_heap_size / Common::Size_4_MB > 2);
|
||||
|
||||
// Insert a physical region for the kernel page table heap region
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
|
||||
|
||||
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
|
||||
// mapping. Tag them.
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
if (region.GetType() == KMemoryRegionType_Dram) {
|
||||
// Check that the region is valid.
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
|
||||
// Set the linear map attribute.
|
||||
region.SetTypeAttribute(KMemoryRegionAttr_LinearMapped);
|
||||
}
|
||||
}
|
||||
|
||||
// Get the linear region extents.
|
||||
const auto linear_extents =
|
||||
memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
ASSERT(linear_extents.GetEndAddress() != 0);
|
||||
|
||||
// Setup the linear mapping region.
|
||||
constexpr size_t LinearRegionAlign = Common::Size_1_GB;
|
||||
const PAddr aligned_linear_phys_start =
|
||||
Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign);
|
||||
const size_t linear_region_size =
|
||||
Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
|
||||
aligned_linear_phys_start;
|
||||
const VAddr linear_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
|
||||
linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
|
||||
|
||||
const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
|
||||
|
||||
// Map and create regions for all the linearly-mapped data.
|
||||
{
|
||||
PAddr cur_phys_addr = 0;
|
||||
u64 cur_size = 0;
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
|
||||
if (cur_size == 0) {
|
||||
cur_phys_addr = region.GetAddress();
|
||||
cur_size = region.GetSize();
|
||||
} else if (cur_phys_addr + cur_size == region.GetAddress()) {
|
||||
cur_size += region.GetSize();
|
||||
} else {
|
||||
cur_phys_addr = region.GetAddress();
|
||||
cur_size = region.GetSize();
|
||||
}
|
||||
|
||||
const VAddr region_virt_addr =
|
||||
region.GetAddress() + linear_region_phys_to_virt_diff;
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
region_virt_addr, region.GetSize(),
|
||||
GetTypeForVirtualLinearMapping(region.GetType())));
|
||||
region.SetPairAddress(region_virt_addr);
|
||||
|
||||
KMemoryRegion* virt_region =
|
||||
memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
|
||||
ASSERT(virt_region != nullptr);
|
||||
virt_region->SetPairAddress(region.GetAddress());
|
||||
}
|
||||
}
|
||||
|
||||
// Insert regions for the initial page table region.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
|
||||
KMemoryRegionType_VirtualDramKernelInitPt));
|
||||
|
||||
// All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
|
||||
// some pool partition. Tag them.
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) {
|
||||
region.SetType(KMemoryRegionType_DramPoolPartition);
|
||||
}
|
||||
}
|
||||
|
||||
// Setup all other memory regions needed to arrange the pool partitions.
|
||||
Init::SetupPoolPartitionMemoryRegions(memory_layout);
|
||||
|
||||
// Cache all linear regions in their own trees for faster access, later.
|
||||
memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
|
||||
linear_region_start);
|
||||
}
|
||||
|
||||
void InitializeMemoryLayout() {
|
||||
// Initialize memory layout
|
||||
constexpr KMemoryLayout layout{KMemoryLayout::GetDefaultLayout()};
|
||||
// Derive the initial memory layout from the emulated board
|
||||
KMemoryLayout memory_layout;
|
||||
DeriveInitialMemoryLayout(memory_layout);
|
||||
|
||||
const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents();
|
||||
const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents();
|
||||
const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents();
|
||||
|
||||
// Initialize memory managers
|
||||
memory_manager = std::make_unique<KMemoryManager>();
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Application,
|
||||
application_pool.GetAddress(),
|
||||
application_pool.GetEndAddress());
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(),
|
||||
applet_pool.GetEndAddress());
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(),
|
||||
system_pool.GetEndAddress());
|
||||
|
||||
// Setup memory regions for emulated processes
|
||||
// TODO(bunnei): These should not be hardcoded regions initialized within the kernel
|
||||
constexpr std::size_t hid_size{0x40000};
|
||||
constexpr std::size_t font_size{0x1100000};
|
||||
constexpr std::size_t irs_size{0x8000};
|
||||
constexpr std::size_t time_size{0x1000};
|
||||
constexpr PAddr hid_addr{layout.System().StartAddress()};
|
||||
constexpr PAddr font_pa{layout.System().StartAddress() + hid_size};
|
||||
constexpr PAddr irs_addr{layout.System().StartAddress() + hid_size + font_size};
|
||||
constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size};
|
||||
|
||||
// Initialize memory manager
|
||||
memory_manager = std::make_unique<KMemoryManager>();
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Application,
|
||||
layout.Application().StartAddress(),
|
||||
layout.Application().EndAddress());
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Applet,
|
||||
layout.Applet().StartAddress(),
|
||||
layout.Applet().EndAddress());
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::System,
|
||||
layout.System().StartAddress(),
|
||||
layout.System().EndAddress());
|
||||
const PAddr hid_phys_addr{system_pool.GetAddress()};
|
||||
const PAddr font_phys_addr{system_pool.GetAddress() + hid_size};
|
||||
const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
|
||||
const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
|
||||
|
||||
hid_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {hid_addr, hid_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, hid_addr, hid_size,
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {hid_phys_addr, hid_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, hid_phys_addr, hid_size,
|
||||
"HID:SharedMemory");
|
||||
font_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {font_pa, font_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, font_pa, font_size,
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {font_phys_addr, font_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, font_phys_addr, font_size,
|
||||
"Font:SharedMemory");
|
||||
irs_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {irs_addr, irs_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, irs_addr, irs_size,
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {irs_phys_addr, irs_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, irs_phys_addr, irs_size,
|
||||
"IRS:SharedMemory");
|
||||
time_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {time_addr, time_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, time_addr, time_size,
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {time_phys_addr, time_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, time_phys_addr, time_size,
|
||||
"Time:SharedMemory");
|
||||
|
||||
// Allocate slab heaps
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Copyright 2014 Citra Emulator Project / PPSSPP Project
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
|
Reference in New Issue
Block a user