From f11e1d29defa6389862a3740815aec2cfea5589f Mon Sep 17 00:00:00 2001 From: pineappleEA Date: Fri, 21 May 2021 06:08:57 +0200 Subject: [PATCH] early-access version 1700 --- README.md | 2 +- src/core/hle/kernel/init/init_slab_setup.cpp | 10 +- src/core/hle/kernel/k_port.cpp | 2 +- src/core/hle/kernel/k_slab_heap.h | 154 ++----------------- src/core/hle/kernel/slab_helpers.h | 4 +- src/core/memory.cpp | 21 --- src/core/memory.h | 9 -- src/yuzu/configuration/configure_cpu.cpp | 11 +- 8 files changed, 28 insertions(+), 185 deletions(-) diff --git a/README.md b/README.md index ca22a3c72..9fdc836e3 100755 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ yuzu emulator early access ============= -This is the source code for early-access 1699. +This is the source code for early-access 1700. ## Legal Notice diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 69ae405e6..10edede17 100755 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -70,14 +70,22 @@ constexpr size_t SlabCountExtraKThread = 160; template VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address, size_t num_objects) { + // TODO(bunnei): This is just a place holder. We should initialize the appropriate KSlabHeap for + // kernel object type T with the backing kernel memory pointer once we emulate kernel memory. + const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*)); VAddr start = Common::AlignUp(address, alignof(T)); + // This is intentionally empty. Once KSlabHeap is fully implemented, we can replace this with + // the pointer to emulated memory to pass along. Until then, KSlabHeap will just allocate/free + // host memory. + void* backing_kernel_memory{}; + if (size > 0) { const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); ASSERT(region != nullptr); ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab)); - T::InitializeSlabHeap(system.Kernel(), system.Memory().GetKernelBuffer(start, size), size); + T::InitializeSlabHeap(system.Kernel(), backing_kernel_memory, size); } return start + size; diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp index 723a1b571..223c0b205 100755 --- a/src/core/hle/kernel/k_port.cpp +++ b/src/core/hle/kernel/k_port.cpp @@ -56,8 +56,8 @@ ResultCode KPort::EnqueueSession(KServerSession* session) { R_UNLESS(state == State::Normal, ResultPortClosed); - server.GetSessionRequestHandler()->ClientConnected(session); server.EnqueueSession(session); + server.GetSessionRequestHandler()->ClientConnected(server.AcceptSession()); return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h index 5ce9a1d7c..81d472a3e 100755 --- a/src/core/hle/kernel/k_slab_heap.h +++ b/src/core/hle/kernel/k_slab_heap.h @@ -4,165 +4,33 @@ #pragma once -#include - -#include "common/assert.h" -#include "common/common_types.h" - namespace Kernel { -namespace impl { +class KernelCore; -class KSlabHeapImpl final : NonCopyable { -public: - struct Node { - Node* next{}; - }; - - constexpr KSlabHeapImpl() = default; - - void Initialize(std::size_t size) { - ASSERT(head == nullptr); - obj_size = size; - } - - constexpr std::size_t GetObjectSize() const { - return obj_size; - } - - Node* GetHead() const { - return head; - } - - void* Allocate() { - Node* ret = head.load(); - - do { - if (ret == nullptr) { - break; - } - } while (!head.compare_exchange_weak(ret, ret->next)); - - return ret; - } - - void Free(void* obj) { - Node* node = static_cast(obj); - - Node* cur_head = head.load(); - do { - node->next = cur_head; - } while (!head.compare_exchange_weak(cur_head, node)); - } - -private: - std::atomic head{}; - std::size_t obj_size{}; -}; - -} // namespace impl - -class KSlabHeapBase : NonCopyable { -public: - constexpr KSlabHeapBase() = default; - - constexpr bool Contains(uintptr_t addr) const { - return start <= addr && addr < end; - } - - constexpr std::size_t GetSlabHeapSize() const { - return (end - start) / GetObjectSize(); - } - - constexpr std::size_t GetObjectSize() const { - return impl.GetObjectSize(); - } - - constexpr uintptr_t GetSlabHeapAddress() const { - return start; - } - - std::size_t GetObjectIndexImpl(const void* obj) const { - return (reinterpret_cast(obj) - start) / GetObjectSize(); - } - - std::size_t GetPeakIndex() const { - return GetObjectIndexImpl(reinterpret_cast(peak)); - } - - void* AllocateImpl() { - return impl.Allocate(); - } - - void FreeImpl(void* obj) { - // Don't allow freeing an object that wasn't allocated from this heap - ASSERT(Contains(reinterpret_cast(obj))); - - impl.Free(obj); - } - - void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) { - // Ensure we don't initialize a slab using null memory - ASSERT(memory != nullptr); - - // Initialize the base allocator - impl.Initialize(obj_size); - - // Set our tracking variables - const std::size_t num_obj = (memory_size / obj_size); - start = reinterpret_cast(memory); - end = start + num_obj * obj_size; - peak = start; - - // Free the objects - u8* cur = reinterpret_cast(end); - - for (std::size_t i{}; i < num_obj; i++) { - cur -= obj_size; - impl.Free(cur); - } - } - -private: - using Impl = impl::KSlabHeapImpl; - - Impl impl; - uintptr_t peak{}; - uintptr_t start{}; - uintptr_t end{}; -}; +/// This is a placeholder class to manage slab heaps for kernel objects. For now, we just allocate +/// these with new/delete, but this can be re-implemented later to allocate these in emulated +/// memory. template -class KSlabHeap final : public KSlabHeapBase { +class KSlabHeap final : NonCopyable { public: - constexpr KSlabHeap() : KSlabHeapBase() {} + KSlabHeap() = default; - void Initialize(void* memory, std::size_t memory_size) { - InitializeImpl(sizeof(T), memory, memory_size); + void Initialize([[maybe_unused]] void* memory, [[maybe_unused]] std::size_t memory_size) { + // Placeholder that should initialize the backing slab heap implementation. } T* Allocate() { - T* obj = static_cast(AllocateImpl()); - if (obj != nullptr) { - new (obj) T(); - } - return obj; + return new T(); } T* AllocateWithKernel(KernelCore& kernel) { - T* obj = static_cast(AllocateImpl()); - if (obj != nullptr) { - new (obj) T(kernel); - } - return obj; + return new T(kernel); } void Free(T* obj) { - FreeImpl(obj); - } - - constexpr std::size_t GetObjectIndex(const T* obj) const { - return GetObjectIndexImpl(obj); + delete obj; } }; diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index d0f7f084b..0c5995db0 100755 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h @@ -67,11 +67,11 @@ class KAutoObjectWithSlabHeapAndContainer : public Base { private: static Derived* Allocate(KernelCore& kernel) { - return new Derived(kernel); + return kernel.SlabHeap().AllocateWithKernel(kernel); } static void Free(KernelCore& kernel, Derived* obj) { - delete obj; + kernel.SlabHeap().Free(obj); } public: diff --git a/src/core/memory.cpp b/src/core/memory.cpp index b4c56e1c1..bf2ef7816 100755 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -82,22 +82,6 @@ struct Memory::Impl { return nullptr; } - u8* GetKernelBuffer(VAddr start_vaddr, size_t size) { - // TODO(bunnei): This is just a workaround until we have kernel memory layout mapped & - // managed. Until then, we use this to allocate and access kernel memory regions. - - auto search = kernel_memory_regions.find(start_vaddr); - if (search != kernel_memory_regions.end()) { - return search->second.get(); - } - - std::unique_ptr new_memory_region{new u8[size]}; - u8* raw_ptr = new_memory_region.get(); - kernel_memory_regions[start_vaddr] = std::move(new_memory_region); - - return raw_ptr; - } - u8 Read8(const VAddr addr) { return Read(addr); } @@ -727,7 +711,6 @@ struct Memory::Impl { } Common::PageTable* current_page_table = nullptr; - std::unordered_map> kernel_memory_regions; Core::System& system; }; @@ -765,10 +748,6 @@ u8* Memory::GetPointer(VAddr vaddr) { return impl->GetPointer(vaddr); } -u8* Memory::GetKernelBuffer(VAddr start_vaddr, size_t size) { - return impl->GetKernelBuffer(start_vaddr, size); -} - const u8* Memory::GetPointer(VAddr vaddr) const { return impl->GetPointer(vaddr); } diff --git a/src/core/memory.h b/src/core/memory.h index 345fd870d..c91eeced9 100755 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -121,15 +121,6 @@ public: */ u8* GetPointer(VAddr vaddr); - /** - * Gets a pointer to the start of a kernel heap allocated memory region. Will allocate one if it - * does not already exist. - * - * @param start_vaddr Start virtual address for the memory region. - * @param size Size of the memory region. - */ - u8* GetKernelBuffer(VAddr start_vaddr, size_t size); - template T* GetPointer(VAddr vaddr) { return reinterpret_cast(GetPointer(vaddr)); diff --git a/src/yuzu/configuration/configure_cpu.cpp b/src/yuzu/configuration/configure_cpu.cpp index 901e54e0d..525c42ff0 100755 --- a/src/yuzu/configuration/configure_cpu.cpp +++ b/src/yuzu/configuration/configure_cpu.cpp @@ -67,14 +67,11 @@ void ConfigureCpu::AccuracyUpdated(int index) { } void ConfigureCpu::UpdateGroup(int index) { - if (Settings::IsConfiguringGlobal()) { - ui->unsafe_group->setVisible(static_cast(index) == - Settings::CPUAccuracy::Unsafe); - } else { - ui->unsafe_group->setVisible( - static_cast(index - ConfigurationShared::USE_GLOBAL_OFFSET) == - Settings::CPUAccuracy::Unsafe); + if (!Settings::IsConfiguringGlobal()) { + index -= ConfigurationShared::USE_GLOBAL_OFFSET; } + const auto accuracy = static_cast(index); + ui->unsafe_group->setVisible(accuracy == Settings::CPUAccuracy::Unsafe); } void ConfigureCpu::ApplyConfiguration() {