early-access version 2519
This commit is contained in:
@@ -176,6 +176,7 @@ void RestoreGlobalState(bool is_powered_on) {
|
||||
values.cpuopt_unsafe_ignore_standard_fpcr.SetGlobal(true);
|
||||
values.cpuopt_unsafe_inaccurate_nan.SetGlobal(true);
|
||||
values.cpuopt_unsafe_fastmem_check.SetGlobal(true);
|
||||
values.cpuopt_unsafe_ignore_global_monitor.SetGlobal(true);
|
||||
|
||||
// Renderer
|
||||
values.renderer_backend.SetGlobal(true);
|
||||
|
@@ -484,12 +484,15 @@ struct Values {
|
||||
BasicSetting<bool> cpuopt_misc_ir{true, "cpuopt_misc_ir"};
|
||||
BasicSetting<bool> cpuopt_reduce_misalign_checks{true, "cpuopt_reduce_misalign_checks"};
|
||||
BasicSetting<bool> cpuopt_fastmem{true, "cpuopt_fastmem"};
|
||||
BasicSetting<bool> cpuopt_fastmem_exclusives{true, "cpuopt_fastmem_exclusives"};
|
||||
BasicSetting<bool> cpuopt_recompile_exclusives{true, "cpuopt_recompile_exclusives"};
|
||||
|
||||
Setting<bool> cpuopt_unsafe_unfuse_fma{true, "cpuopt_unsafe_unfuse_fma"};
|
||||
Setting<bool> cpuopt_unsafe_reduce_fp_error{true, "cpuopt_unsafe_reduce_fp_error"};
|
||||
Setting<bool> cpuopt_unsafe_ignore_standard_fpcr{true, "cpuopt_unsafe_ignore_standard_fpcr"};
|
||||
Setting<bool> cpuopt_unsafe_inaccurate_nan{true, "cpuopt_unsafe_inaccurate_nan"};
|
||||
Setting<bool> cpuopt_unsafe_fastmem_check{true, "cpuopt_unsafe_fastmem_check"};
|
||||
Setting<bool> cpuopt_unsafe_ignore_global_monitor{true, "cpuopt_unsafe_ignore_global_monitor"};
|
||||
|
||||
// Renderer
|
||||
RangedSetting<RendererBackend> renderer_backend{
|
||||
|
@@ -137,6 +137,8 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
config.only_detect_misalignment_via_page_table_on_page_boundary = true;
|
||||
config.fastmem_exclusive_access = true;
|
||||
config.recompile_on_exclusive_fastmem_failure = true;
|
||||
|
||||
// Multi-process state
|
||||
config.processor_id = core_index;
|
||||
@@ -178,6 +180,12 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
|
||||
if (!Settings::values.cpuopt_fastmem) {
|
||||
config.fastmem_pointer = nullptr;
|
||||
}
|
||||
if (!Settings::values.cpuopt_fastmem_exclusives) {
|
||||
config.fastmem_exclusive_access = false;
|
||||
}
|
||||
if (!Settings::values.cpuopt_recompile_exclusives) {
|
||||
config.recompile_on_exclusive_fastmem_failure = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Unsafe optimizations
|
||||
@@ -195,6 +203,9 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
|
||||
if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
|
||||
}
|
||||
if (Settings::values.cpuopt_unsafe_ignore_global_monitor) {
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
|
||||
}
|
||||
}
|
||||
|
||||
// Curated optimizations
|
||||
@@ -203,6 +214,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA;
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreStandardFPCRValue;
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
|
||||
}
|
||||
|
||||
return std::make_unique<Dynarmic::A32::Jit>(config);
|
||||
|
@@ -185,6 +185,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
|
||||
config.fastmem_pointer = page_table->fastmem_arena;
|
||||
config.fastmem_address_space_bits = address_space_bits;
|
||||
config.silently_mirror_fastmem = false;
|
||||
|
||||
config.fastmem_exclusive_access = true;
|
||||
config.recompile_on_exclusive_fastmem_failure = true;
|
||||
}
|
||||
|
||||
// Multi-process state
|
||||
@@ -237,6 +240,12 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
|
||||
if (!Settings::values.cpuopt_fastmem) {
|
||||
config.fastmem_pointer = nullptr;
|
||||
}
|
||||
if (!Settings::values.cpuopt_fastmem_exclusives) {
|
||||
config.fastmem_exclusive_access = false;
|
||||
}
|
||||
if (!Settings::values.cpuopt_recompile_exclusives) {
|
||||
config.recompile_on_exclusive_fastmem_failure = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Unsafe optimizations
|
||||
@@ -254,6 +263,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
|
||||
if (Settings::values.cpuopt_unsafe_fastmem_check) {
|
||||
config.fastmem_address_space_bits = 64;
|
||||
}
|
||||
if (Settings::values.cpuopt_unsafe_ignore_global_monitor) {
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
|
||||
}
|
||||
}
|
||||
|
||||
// Curated optimizations
|
||||
@@ -262,6 +274,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA;
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
|
||||
config.fastmem_address_space_bits = 64;
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
|
||||
}
|
||||
|
||||
return std::make_shared<Dynarmic::A64::Jit>(config);
|
||||
|
@@ -37,8 +37,8 @@ u128 DynarmicExclusiveMonitor::ExclusiveRead128(std::size_t core_index, VAddr ad
|
||||
});
|
||||
}
|
||||
|
||||
void DynarmicExclusiveMonitor::ClearExclusive() {
|
||||
monitor.Clear();
|
||||
void DynarmicExclusiveMonitor::ClearExclusive(std::size_t core_index) {
|
||||
monitor.ClearProcessor(core_index);
|
||||
}
|
||||
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
|
||||
|
@@ -29,7 +29,7 @@ public:
|
||||
u32 ExclusiveRead32(std::size_t core_index, VAddr addr) override;
|
||||
u64 ExclusiveRead64(std::size_t core_index, VAddr addr) override;
|
||||
u128 ExclusiveRead128(std::size_t core_index, VAddr addr) override;
|
||||
void ClearExclusive() override;
|
||||
void ClearExclusive(std::size_t core_index) override;
|
||||
|
||||
bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) override;
|
||||
bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) override;
|
||||
|
@@ -23,7 +23,7 @@ public:
|
||||
virtual u32 ExclusiveRead32(std::size_t core_index, VAddr addr) = 0;
|
||||
virtual u64 ExclusiveRead64(std::size_t core_index, VAddr addr) = 0;
|
||||
virtual u128 ExclusiveRead128(std::size_t core_index, VAddr addr) = 0;
|
||||
virtual void ClearExclusive() = 0;
|
||||
virtual void ClearExclusive(std::size_t core_index) = 0;
|
||||
|
||||
virtual bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) = 0;
|
||||
virtual bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) = 0;
|
||||
|
@@ -49,7 +49,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
|
||||
}
|
||||
} else {
|
||||
// Otherwise, clear our exclusive hold and finish
|
||||
monitor.ClearExclusive();
|
||||
monitor.ClearExclusive(current_core);
|
||||
}
|
||||
|
||||
// We're done.
|
||||
@@ -78,7 +78,7 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
|
||||
}
|
||||
} else {
|
||||
// Otherwise, clear our exclusive hold and finish.
|
||||
monitor.ClearExclusive();
|
||||
monitor.ClearExclusive(current_core);
|
||||
}
|
||||
|
||||
// We're done.
|
||||
|
@@ -31,7 +31,7 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
|
||||
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
|
||||
return KMemoryManager::Pool::SystemNonSecure;
|
||||
} else {
|
||||
ASSERT("InvalidMemoryRegionType for conversion to Pool");
|
||||
ASSERT_MSG("InvalidMemoryRegionType for conversion to Pool");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
@@ -102,9 +102,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
||||
Impl* manager = std::addressof(managers[num_managers++]);
|
||||
ASSERT(num_managers <= managers.size());
|
||||
|
||||
const size_t cur_size =
|
||||
manager->Initialize(system, region_address, region_size, management_region,
|
||||
management_region_end, region_pool);
|
||||
const size_t cur_size = manager->Initialize(region_address, region_size, management_region,
|
||||
management_region_end, region_pool);
|
||||
management_region += cur_size;
|
||||
ASSERT(management_region <= management_region_end);
|
||||
|
||||
@@ -384,9 +383,8 @@ void KMemoryManager::Open(const KPageLinkedList& pg) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t KMemoryManager::Impl::Initialize([[maybe_unused]] Core::System& system, PAddr address,
|
||||
size_t size, VAddr management, VAddr management_end,
|
||||
Pool p) {
|
||||
size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
|
||||
VAddr management_end, Pool p) {
|
||||
// Calculate management sizes.
|
||||
const size_t ref_count_size = (size / PageSize) * sizeof(u16);
|
||||
const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size);
|
||||
|
@@ -112,8 +112,8 @@ private:
|
||||
Impl() = default;
|
||||
~Impl() = default;
|
||||
|
||||
size_t Initialize(Core::System& system, PAddr address, size_t size, VAddr management,
|
||||
VAddr management_end, Pool p);
|
||||
size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
|
||||
Pool p);
|
||||
|
||||
VAddr AllocateBlock(s32 index, bool random) {
|
||||
return heap.AllocateBlock(index, random);
|
||||
|
@@ -241,7 +241,7 @@ static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
|
||||
|
||||
// UNUSED: .DeriveSparse(2, 2, 0);
|
||||
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug =
|
||||
constexpr auto KMemoryRegionType_VirtualDramUnknownDebug =
|
||||
KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
|
||||
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
|
||||
|
||||
|
@@ -71,7 +71,7 @@ struct KernelCore::Impl {
|
||||
// Derive the initial memory layout from the emulated board
|
||||
Init::InitializeSlabResourceCounts(kernel);
|
||||
DeriveInitialMemoryLayout();
|
||||
Init::InitializeSlabHeaps(system, memory_layout);
|
||||
Init::InitializeSlabHeaps(system, *memory_layout);
|
||||
|
||||
// Initialize kernel memory and resources.
|
||||
InitializeSystemResourceLimit(kernel, system.CoreTiming());
|
||||
@@ -223,7 +223,7 @@ struct KernelCore::Impl {
|
||||
system_resource_limit = KResourceLimit::Create(system.Kernel());
|
||||
system_resource_limit->Initialize(&core_timing);
|
||||
|
||||
const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes();
|
||||
const auto [total_size, kernel_size] = memory_layout->GetTotalAndKernelMemorySizes();
|
||||
|
||||
// If setting the default system values fails, then something seriously wrong has occurred.
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size)
|
||||
@@ -353,15 +353,17 @@ struct KernelCore::Impl {
|
||||
}
|
||||
|
||||
void DeriveInitialMemoryLayout() {
|
||||
memory_layout = std::make_unique<KMemoryLayout>();
|
||||
|
||||
// Insert the root region for the virtual memory tree, from which all other regions will
|
||||
// derive.
|
||||
memory_layout.GetVirtualMemoryRegionTree().InsertDirectly(
|
||||
memory_layout->GetVirtualMemoryRegionTree().InsertDirectly(
|
||||
KernelVirtualAddressSpaceBase,
|
||||
KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1);
|
||||
|
||||
// Insert the root region for the physical memory tree, from which all other regions will
|
||||
// derive.
|
||||
memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly(
|
||||
memory_layout->GetPhysicalMemoryRegionTree().InsertDirectly(
|
||||
KernelPhysicalAddressSpaceBase,
|
||||
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
|
||||
|
||||
@@ -378,7 +380,7 @@ struct KernelCore::Impl {
|
||||
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
|
||||
kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
|
||||
}
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
|
||||
kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
|
||||
|
||||
// Setup the code region.
|
||||
@@ -387,11 +389,11 @@ struct KernelCore::Impl {
|
||||
Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
|
||||
constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
|
||||
constexpr size_t code_region_size = code_region_end - code_region_start;
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
|
||||
code_region_start, code_region_size, KMemoryRegionType_KernelCode));
|
||||
|
||||
// Setup board-specific device physical regions.
|
||||
Init::SetupDevicePhysicalMemoryRegions(memory_layout);
|
||||
Init::SetupDevicePhysicalMemoryRegions(*memory_layout);
|
||||
|
||||
// Determine the amount of space needed for the misc region.
|
||||
size_t misc_region_needed_size;
|
||||
@@ -400,7 +402,7 @@ struct KernelCore::Impl {
|
||||
misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize));
|
||||
|
||||
// Account for each auto-map device.
|
||||
for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
for (const auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
|
||||
if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
|
||||
// Check that the region is valid.
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
@@ -425,22 +427,22 @@ struct KernelCore::Impl {
|
||||
|
||||
// Setup the misc region.
|
||||
const VAddr misc_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
|
||||
misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
|
||||
|
||||
// Setup the stack region.
|
||||
constexpr size_t StackRegionSize = 14_MiB;
|
||||
constexpr size_t StackRegionAlign = KernelAslrAlignment;
|
||||
const VAddr stack_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
|
||||
stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
|
||||
|
||||
// Determine the size of the resource region.
|
||||
const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit();
|
||||
const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit();
|
||||
|
||||
// Determine the size of the slab region.
|
||||
const size_t slab_region_size =
|
||||
@@ -457,23 +459,23 @@ struct KernelCore::Impl {
|
||||
Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
|
||||
Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
|
||||
const VAddr slab_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
|
||||
(code_end_phys_addr % SlabRegionAlign);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
|
||||
slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
|
||||
|
||||
// Setup the temp region.
|
||||
constexpr size_t TempRegionSize = 128_MiB;
|
||||
constexpr size_t TempRegionAlign = KernelAslrAlignment;
|
||||
const VAddr temp_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
|
||||
KMemoryRegionType_KernelTemp));
|
||||
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
|
||||
KMemoryRegionType_KernelTemp));
|
||||
|
||||
// Automatically map in devices that have auto-map attributes.
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
|
||||
// We only care about kernel regions.
|
||||
if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) {
|
||||
continue;
|
||||
@@ -500,21 +502,21 @@ struct KernelCore::Impl {
|
||||
const size_t map_size =
|
||||
Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
|
||||
const VAddr map_virt_addr =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
|
||||
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
|
||||
map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
|
||||
map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
|
||||
region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
|
||||
}
|
||||
|
||||
Init::SetupDramPhysicalMemoryRegions(memory_layout);
|
||||
Init::SetupDramPhysicalMemoryRegions(*memory_layout);
|
||||
|
||||
// Insert a physical region for the kernel code region.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||
code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
|
||||
|
||||
// Insert a physical region for the kernel slab region.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||
slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
|
||||
|
||||
// Determine size available for kernel page table heaps, requiring > 8 MB.
|
||||
@@ -523,12 +525,12 @@ struct KernelCore::Impl {
|
||||
ASSERT(page_table_heap_size / 4_MiB > 2);
|
||||
|
||||
// Insert a physical region for the kernel page table heap region
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||
slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
|
||||
|
||||
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
|
||||
// mapping. Tag them.
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
|
||||
if (region.GetType() == KMemoryRegionType_Dram) {
|
||||
// Check that the region is valid.
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
@@ -540,7 +542,7 @@ struct KernelCore::Impl {
|
||||
|
||||
// Get the linear region extents.
|
||||
const auto linear_extents =
|
||||
memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
memory_layout->GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
ASSERT(linear_extents.GetEndAddress() != 0);
|
||||
|
||||
@@ -552,7 +554,7 @@ struct KernelCore::Impl {
|
||||
Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
|
||||
aligned_linear_phys_start;
|
||||
const VAddr linear_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
|
||||
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
|
||||
linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
|
||||
|
||||
const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
|
||||
@@ -561,7 +563,7 @@ struct KernelCore::Impl {
|
||||
{
|
||||
PAddr cur_phys_addr = 0;
|
||||
u64 cur_size = 0;
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
|
||||
if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
|
||||
continue;
|
||||
}
|
||||
@@ -580,47 +582,47 @@ struct KernelCore::Impl {
|
||||
|
||||
const VAddr region_virt_addr =
|
||||
region.GetAddress() + linear_region_phys_to_virt_diff;
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
|
||||
region_virt_addr, region.GetSize(),
|
||||
GetTypeForVirtualLinearMapping(region.GetType())));
|
||||
region.SetPairAddress(region_virt_addr);
|
||||
|
||||
KMemoryRegion* virt_region =
|
||||
memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
|
||||
memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
|
||||
ASSERT(virt_region != nullptr);
|
||||
virt_region->SetPairAddress(region.GetAddress());
|
||||
}
|
||||
}
|
||||
|
||||
// Insert regions for the initial page table region.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||
resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
|
||||
resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
|
||||
KMemoryRegionType_VirtualDramKernelInitPt));
|
||||
|
||||
// All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
|
||||
// some pool partition. Tag them.
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
|
||||
if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) {
|
||||
region.SetType(KMemoryRegionType_DramPoolPartition);
|
||||
}
|
||||
}
|
||||
|
||||
// Setup all other memory regions needed to arrange the pool partitions.
|
||||
Init::SetupPoolPartitionMemoryRegions(memory_layout);
|
||||
Init::SetupPoolPartitionMemoryRegions(*memory_layout);
|
||||
|
||||
// Cache all linear regions in their own trees for faster access, later.
|
||||
memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
|
||||
linear_region_start);
|
||||
memory_layout->InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
|
||||
linear_region_start);
|
||||
}
|
||||
|
||||
void InitializeMemoryLayout() {
|
||||
const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents();
|
||||
const auto system_pool = memory_layout->GetKernelSystemPoolRegionPhysicalExtents();
|
||||
|
||||
// Initialize the memory manager.
|
||||
memory_manager = std::make_unique<KMemoryManager>(system);
|
||||
const auto& management_region = memory_layout.GetPoolManagementRegion();
|
||||
const auto& management_region = memory_layout->GetPoolManagementRegion();
|
||||
ASSERT(management_region.GetEndAddress() != 0);
|
||||
memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize());
|
||||
|
||||
@@ -773,7 +775,7 @@ struct KernelCore::Impl {
|
||||
Kernel::KSharedMemory* hidbus_shared_mem{};
|
||||
|
||||
// Memory layout
|
||||
KMemoryLayout memory_layout;
|
||||
std::unique_ptr<KMemoryLayout> memory_layout;
|
||||
|
||||
// Threads used for services
|
||||
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
|
||||
@@ -1149,7 +1151,7 @@ const KWorkerTaskManager& KernelCore::WorkerTaskManager() const {
|
||||
}
|
||||
|
||||
const KMemoryLayout& KernelCore::MemoryLayout() const {
|
||||
return impl->memory_layout;
|
||||
return *impl->memory_layout;
|
||||
}
|
||||
|
||||
bool KernelCore::IsPhantomModeForSingleCore() const {
|
||||
|
@@ -632,6 +632,7 @@ void Config::ReadCpuValues() {
|
||||
ReadGlobalSetting(Settings::values.cpuopt_unsafe_ignore_standard_fpcr);
|
||||
ReadGlobalSetting(Settings::values.cpuopt_unsafe_inaccurate_nan);
|
||||
ReadGlobalSetting(Settings::values.cpuopt_unsafe_fastmem_check);
|
||||
ReadGlobalSetting(Settings::values.cpuopt_unsafe_ignore_global_monitor);
|
||||
|
||||
if (global) {
|
||||
ReadBasicSetting(Settings::values.cpu_debug_mode);
|
||||
@@ -644,6 +645,8 @@ void Config::ReadCpuValues() {
|
||||
ReadBasicSetting(Settings::values.cpuopt_misc_ir);
|
||||
ReadBasicSetting(Settings::values.cpuopt_reduce_misalign_checks);
|
||||
ReadBasicSetting(Settings::values.cpuopt_fastmem);
|
||||
ReadBasicSetting(Settings::values.cpuopt_fastmem_exclusives);
|
||||
ReadBasicSetting(Settings::values.cpuopt_recompile_exclusives);
|
||||
}
|
||||
|
||||
qt_config->endGroup();
|
||||
@@ -1173,6 +1176,7 @@ void Config::SaveCpuValues() {
|
||||
WriteGlobalSetting(Settings::values.cpuopt_unsafe_ignore_standard_fpcr);
|
||||
WriteGlobalSetting(Settings::values.cpuopt_unsafe_inaccurate_nan);
|
||||
WriteGlobalSetting(Settings::values.cpuopt_unsafe_fastmem_check);
|
||||
WriteGlobalSetting(Settings::values.cpuopt_unsafe_ignore_global_monitor);
|
||||
|
||||
if (global) {
|
||||
WriteBasicSetting(Settings::values.cpu_debug_mode);
|
||||
|
@@ -36,6 +36,7 @@ void ConfigureCpu::SetConfiguration() {
|
||||
ui->cpuopt_unsafe_ignore_standard_fpcr->setEnabled(runtime_lock);
|
||||
ui->cpuopt_unsafe_inaccurate_nan->setEnabled(runtime_lock);
|
||||
ui->cpuopt_unsafe_fastmem_check->setEnabled(runtime_lock);
|
||||
ui->cpuopt_unsafe_ignore_global_monitor->setEnabled(runtime_lock);
|
||||
|
||||
ui->cpuopt_unsafe_unfuse_fma->setChecked(Settings::values.cpuopt_unsafe_unfuse_fma.GetValue());
|
||||
ui->cpuopt_unsafe_reduce_fp_error->setChecked(
|
||||
@@ -46,6 +47,8 @@ void ConfigureCpu::SetConfiguration() {
|
||||
Settings::values.cpuopt_unsafe_inaccurate_nan.GetValue());
|
||||
ui->cpuopt_unsafe_fastmem_check->setChecked(
|
||||
Settings::values.cpuopt_unsafe_fastmem_check.GetValue());
|
||||
ui->cpuopt_unsafe_ignore_global_monitor->setChecked(
|
||||
Settings::values.cpuopt_unsafe_ignore_global_monitor.GetValue());
|
||||
|
||||
if (Settings::IsConfiguringGlobal()) {
|
||||
ui->accuracy->setCurrentIndex(static_cast<int>(Settings::values.cpu_accuracy.GetValue()));
|
||||
@@ -82,6 +85,9 @@ void ConfigureCpu::ApplyConfiguration() {
|
||||
ConfigurationShared::ApplyPerGameSetting(&Settings::values.cpuopt_unsafe_fastmem_check,
|
||||
ui->cpuopt_unsafe_fastmem_check,
|
||||
cpuopt_unsafe_fastmem_check);
|
||||
ConfigurationShared::ApplyPerGameSetting(&Settings::values.cpuopt_unsafe_ignore_global_monitor,
|
||||
ui->cpuopt_unsafe_ignore_global_monitor,
|
||||
cpuopt_unsafe_ignore_global_monitor);
|
||||
}
|
||||
|
||||
void ConfigureCpu::changeEvent(QEvent* event) {
|
||||
@@ -120,4 +126,7 @@ void ConfigureCpu::SetupPerGameUI() {
|
||||
ConfigurationShared::SetColoredTristate(ui->cpuopt_unsafe_fastmem_check,
|
||||
Settings::values.cpuopt_unsafe_fastmem_check,
|
||||
cpuopt_unsafe_fastmem_check);
|
||||
ConfigurationShared::SetColoredTristate(ui->cpuopt_unsafe_ignore_global_monitor,
|
||||
Settings::values.cpuopt_unsafe_ignore_global_monitor,
|
||||
cpuopt_unsafe_ignore_global_monitor);
|
||||
}
|
||||
|
@@ -45,6 +45,7 @@ private:
|
||||
ConfigurationShared::CheckState cpuopt_unsafe_ignore_standard_fpcr;
|
||||
ConfigurationShared::CheckState cpuopt_unsafe_inaccurate_nan;
|
||||
ConfigurationShared::CheckState cpuopt_unsafe_fastmem_check;
|
||||
ConfigurationShared::CheckState cpuopt_unsafe_ignore_global_monitor;
|
||||
|
||||
const Core::System& system;
|
||||
};
|
||||
|
@@ -150,6 +150,18 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QCheckBox" name="cpuopt_unsafe_ignore_global_monitor">
|
||||
<property name="toolTip">
|
||||
<string>
|
||||
<div>This option improves speed by relying only on the semantics of cmpxchg to ensure safety of exclusive access instructions. Please note this may result in deadlocks and other race conditions.</div>
|
||||
</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>Ignore global monitor</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</item>
|
||||
|
@@ -44,6 +44,12 @@ void ConfigureCpuDebug::SetConfiguration() {
|
||||
Settings::values.cpuopt_reduce_misalign_checks.GetValue());
|
||||
ui->cpuopt_fastmem->setEnabled(runtime_lock);
|
||||
ui->cpuopt_fastmem->setChecked(Settings::values.cpuopt_fastmem.GetValue());
|
||||
ui->cpuopt_fastmem_exclusives->setEnabled(runtime_lock);
|
||||
ui->cpuopt_fastmem_exclusives->setChecked(
|
||||
Settings::values.cpuopt_fastmem_exclusives.GetValue());
|
||||
ui->cpuopt_recompile_exclusives->setEnabled(runtime_lock);
|
||||
ui->cpuopt_recompile_exclusives->setChecked(
|
||||
Settings::values.cpuopt_recompile_exclusives.GetValue());
|
||||
}
|
||||
|
||||
void ConfigureCpuDebug::ApplyConfiguration() {
|
||||
@@ -56,6 +62,8 @@ void ConfigureCpuDebug::ApplyConfiguration() {
|
||||
Settings::values.cpuopt_misc_ir = ui->cpuopt_misc_ir->isChecked();
|
||||
Settings::values.cpuopt_reduce_misalign_checks = ui->cpuopt_reduce_misalign_checks->isChecked();
|
||||
Settings::values.cpuopt_fastmem = ui->cpuopt_fastmem->isChecked();
|
||||
Settings::values.cpuopt_fastmem_exclusives = ui->cpuopt_fastmem_exclusives->isChecked();
|
||||
Settings::values.cpuopt_recompile_exclusives = ui->cpuopt_recompile_exclusives->isChecked();
|
||||
}
|
||||
|
||||
void ConfigureCpuDebug::changeEvent(QEvent* event) {
|
||||
|
@@ -144,7 +144,34 @@
|
||||
</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>Enable Host MMU Emulation</string>
|
||||
<string>Enable Host MMU Emulation (general memory instructions)</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QCheckBox" name="cpuopt_fastmem_exclusives">
|
||||
<property name="toolTip">
|
||||
<string>
|
||||
<div style="white-space: nowrap">This optimization speeds up exclusive memory accesses by the guest program.</div>
|
||||
<div style="white-space: nowrap">Enabling it causes guest exclusive memory reads/writes to be done directly into memory and make use of Host's MMU.</div>
|
||||
<div style="white-space: nowrap">Disabling this forces all exclusive memory accesses to use Software MMU Emulation.</div>
|
||||
</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>Enable Host MMU Emulation (exclusive memory instructions)</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QCheckBox" name="cpuopt_recompile_exclusives">
|
||||
<property name="toolTip">
|
||||
<string>
|
||||
<div style="white-space: nowrap">This optimization speeds up exclusive memory accesses by the guest program.</div>
|
||||
<div style="white-space: nowrap">Enabling it reduces the overhead of fastmem failure of exclusive memory accesses.</div>
|
||||
</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>Enable recompilation of exclusive memory instructions</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
|
@@ -280,11 +280,14 @@ void Config::ReadValues() {
|
||||
ReadSetting("Cpu", Settings::values.cpuopt_misc_ir);
|
||||
ReadSetting("Cpu", Settings::values.cpuopt_reduce_misalign_checks);
|
||||
ReadSetting("Cpu", Settings::values.cpuopt_fastmem);
|
||||
ReadSetting("Cpu", Settings::values.cpuopt_fastmem_exclusives);
|
||||
ReadSetting("Cpu", Settings::values.cpuopt_recompile_exclusives);
|
||||
ReadSetting("Cpu", Settings::values.cpuopt_unsafe_unfuse_fma);
|
||||
ReadSetting("Cpu", Settings::values.cpuopt_unsafe_reduce_fp_error);
|
||||
ReadSetting("Cpu", Settings::values.cpuopt_unsafe_ignore_standard_fpcr);
|
||||
ReadSetting("Cpu", Settings::values.cpuopt_unsafe_inaccurate_nan);
|
||||
ReadSetting("Cpu", Settings::values.cpuopt_unsafe_fastmem_check);
|
||||
ReadSetting("Cpu", Settings::values.cpuopt_unsafe_ignore_global_monitor);
|
||||
|
||||
// Renderer
|
||||
ReadSetting("Renderer", Settings::values.renderer_backend);
|
||||
|
@@ -174,6 +174,14 @@ cpuopt_reduce_misalign_checks =
|
||||
# 0: Disabled, 1 (default): Enabled
|
||||
cpuopt_fastmem =
|
||||
|
||||
# Enable Host MMU Emulation for exclusive memory instructions (faster guest memory access)
|
||||
# 0: Disabled, 1 (default): Enabled
|
||||
cpuopt_fastmem_exclusives =
|
||||
|
||||
# Enable fallback on failure of fastmem of exclusive memory instructions (faster guest memory access)
|
||||
# 0: Disabled, 1 (default): Enabled
|
||||
cpuopt_recompile_exclusives =
|
||||
|
||||
# Enable unfuse FMA (improve performance on CPUs without FMA)
|
||||
# Only enabled if cpu_accuracy is set to Unsafe. Automatically chosen with cpu_accuracy = Auto-select.
|
||||
# 0: Disabled, 1 (default): Enabled
|
||||
@@ -199,6 +207,11 @@ cpuopt_unsafe_inaccurate_nan =
|
||||
# 0: Disabled, 1 (default): Enabled
|
||||
cpuopt_unsafe_fastmem_check =
|
||||
|
||||
# Enable faster exclusive instructions
|
||||
# Only enabled if cpu_accuracy is set to Unsafe. Automatically chosen with cpu_accuracy = Auto-select.
|
||||
# 0: Disabled, 1 (default): Enabled
|
||||
cpuopt_unsafe_ignore_global_monitor =
|
||||
|
||||
[Renderer]
|
||||
# Which backend API to use.
|
||||
# 0 (default): OpenGL, 1: Vulkan
|
||||
|
Reference in New Issue
Block a user