diff --git a/README.md b/README.md index 80c82d93c..d9e611a67 100755 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ yuzu emulator early access ============= -This is the source code for early-access 1376. +This is the source code for early-access 1377. ## Legal Notice diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h index 842d62ca7..71d136d4a 100755 --- a/src/common/common_funcs.h +++ b/src/common/common_funcs.h @@ -97,6 +97,9 @@ __declspec(dllimport) void __stdcall DebugBreak(void); #define R_UNLESS(expr, res) \ { \ if (!(expr)) { \ + if (res.IsError()) { \ + LOG_CRITICAL(Kernel, "Failed with error {}", res.raw); \ + } \ return res; \ } \ } diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 7d7e191ea..6c4c8e9e4 100755 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -251,16 +251,10 @@ void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) { } void ARM_Dynarmic_32::ChangeProcessorID(std::size_t new_core_id) { - if (!jit) { - return; - } jit->ChangeProcessorID(new_core_id); } void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) { - if (!jit) { - return; - } Dynarmic::A32::Context context; jit->SaveContext(context); ctx.cpu_registers = context.Regs(); @@ -270,9 +264,6 @@ void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) { } void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) { - if (!jit) { - return; - } Dynarmic::A32::Context context; context.Regs() = ctx.cpu_registers; context.ExtRegs() = ctx.extension_registers; @@ -282,9 +273,6 @@ void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) { } void ARM_Dynarmic_32::PrepareReschedule() { - if (!jit) { - return; - } jit->HaltExecution(); } diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index f755a39cf..4c5ebca22 100755 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -290,16 +290,10 @@ void ARM_Dynarmic_64::SetTPIDR_EL0(u64 value) { } void ARM_Dynarmic_64::ChangeProcessorID(std::size_t new_core_id) { - if (!jit) { - return; - } jit->ChangeProcessorID(new_core_id); } void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) { - if (!jit) { - return; - } ctx.cpu_registers = jit->GetRegisters(); ctx.sp = jit->GetSP(); ctx.pc = jit->GetPC(); @@ -311,9 +305,6 @@ void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) { } void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) { - if (!jit) { - return; - } jit->SetRegisters(ctx.cpu_registers); jit->SetSP(ctx.sp); jit->SetPC(ctx.pc); @@ -325,9 +316,6 @@ void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) { } void ARM_Dynarmic_64::PrepareReschedule() { - if (!jit) { - return; - } jit->HaltExecution(); } diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 3b38f8f3e..169455d18 100755 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -23,11 +23,11 @@ public: explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {} bool IsLockedByCurrentThread() const { - return this->owner_thread == GetCurrentThreadPointer(kernel); + return owner_thread == GetCurrentThreadPointer(kernel); } void Lock() { - if (this->IsLockedByCurrentThread()) { + if (IsLockedByCurrentThread()) { // If we already own the lock, we can just increment the count. ASSERT(lock_count > 0); lock_count++; @@ -47,7 +47,7 @@ public: } void Unlock() { - ASSERT(this->IsLockedByCurrentThread()); + ASSERT(IsLockedByCurrentThread()); ASSERT(lock_count > 0); // Release an instance of the lock. diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index f57e98047..45ad589d9 100755 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -101,11 +101,12 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast(type)); break; } + thread_type_for_debugging = type; // Set the ideal core ID and affinity mask. virtual_ideal_core_id = virt_core; physical_ideal_core_id = phys_core; - virtual_affinity_mask = (static_cast(1) << virt_core); + virtual_affinity_mask = 1ULL << virt_core; physical_affinity_mask.SetAffinity(phys_core, true); // Set the thread state. @@ -353,7 +354,7 @@ void KThread::Unpin() { // Enable core migration. ASSERT(num_core_migration_disables == 1); { - --num_core_migration_disables; + num_core_migration_disables--; // Restore our original state. const KAffinityMask old_mask = physical_affinity_mask; @@ -494,8 +495,8 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { // Update the pinned waiter list. { - bool retry_update = false; - bool thread_is_pinned = false; + bool retry_update{}; + bool thread_is_pinned{}; do { // Lock the scheduler. KScopedSchedulerLock sl{kernel}; @@ -507,7 +508,7 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { retry_update = false; // Check if the thread is currently running. - bool thread_is_current = false; + bool thread_is_current{}; s32 thread_core; for (thread_core = 0; thread_core < static_cast(Core::Hardware::NUM_CPU_CORES); ++thread_core) { @@ -683,8 +684,8 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) { // If the thread is now paused, update the pinned waiter list. if (activity == Svc::ThreadActivity::Paused) { - bool thread_is_pinned = false; - bool thread_is_current; + bool thread_is_pinned{}; + bool thread_is_current{}; do { // Lock the scheduler. KScopedSchedulerLock sl{kernel}; diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index eeddf5a65..c8ac656a4 100755 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -534,6 +534,10 @@ public: return wait_reason_for_debugging; } + [[nodiscard]] ThreadType GetThreadTypeForDebugging() const { + return thread_type_for_debugging; + } + void SetWaitObjectsForDebugging(const std::span& objects) { wait_objects_for_debugging.clear(); wait_objects_for_debugging.reserve(objects.size()); @@ -721,6 +725,7 @@ private: std::vector wait_objects_for_debugging; VAddr mutex_wait_address_for_debugging{}; ThreadWaitReasonForDebugging wait_reason_for_debugging{}; + ThreadType thread_type_for_debugging{}; std::string name; public: diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 0e6f1df5f..df309d523 100755 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -211,9 +211,8 @@ struct KernelCore::Impl { KThread* GetHostDummyThread() { const thread_local auto thread = KThread::Create( - Core::System::GetInstance(), ThreadType::Main, - std::string{"DummyThread:" + GetHostThreadId()}, 0, KThread::DefaultThreadPriority, - 0, static_cast(3), 0, nullptr, + system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0, + KThread::DefaultThreadPriority, 0, static_cast(3), 0, nullptr, []([[maybe_unused]] void* arg) { UNREACHABLE(); }, nullptr) .Unwrap(); return thread.get(); @@ -272,7 +271,7 @@ struct KernelCore::Impl { constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size}; // Initialize memory manager - memory_manager = std::make_unique(system.Kernel()); + memory_manager = std::make_unique(); memory_manager->InitializeManager(Memory::MemoryManager::Pool::Application, layout.Application().StartAddress(), layout.Application().EndAddress()); diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp index 75a8a05b5..acf13585c 100755 --- a/src/core/hle/kernel/memory/memory_manager.cpp +++ b/src/core/hle/kernel/memory/memory_manager.cpp @@ -41,9 +41,6 @@ std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u6 return total_metadata_size; } -MemoryManager::MemoryManager(KernelCore& kernel) - : pool_lock_0{kernel}, pool_lock_1{kernel}, pool_lock_2{kernel}, pool_lock_3{kernel} {} - void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) { ASSERT(pool < Pool::Count); managers[static_cast(pool)].Initialize(pool, start_address, end_address); @@ -58,7 +55,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align // Lock the pool that we're allocating from const auto pool_index{static_cast(pool)}; - KScopedLightLock lk{PoolLock(pool_index)}; + std::lock_guard lock{pool_locks[pool_index]}; // Choose a heap based on our page size request const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)}; @@ -93,7 +90,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa // Lock the pool that we're allocating from const auto pool_index{static_cast(pool)}; - KScopedLightLock lk{PoolLock(pool_index)}; + std::lock_guard lock{pool_locks[pool_index]}; // Choose a heap based on our page size request const s32 heap_index{PageHeap::GetBlockIndex(num_pages)}; @@ -160,7 +157,7 @@ ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, // Lock the pool that we're freeing from const auto pool_index{static_cast(pool)}; - KScopedLightLock lk{PoolLock(pool_index)}; + std::lock_guard lock{pool_locks[pool_index]}; // TODO (bunnei): Support multiple managers Impl& chosen_manager{managers[pool_index]}; diff --git a/src/core/hle/kernel/memory/memory_manager.h b/src/core/hle/kernel/memory/memory_manager.h index d9f16d15e..3cf444857 100755 --- a/src/core/hle/kernel/memory/memory_manager.h +++ b/src/core/hle/kernel/memory/memory_manager.h @@ -7,16 +7,10 @@ #include #include -#include "common/assert.h" #include "common/common_types.h" -#include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/memory/page_heap.h" #include "core/hle/result.h" -namespace Kernel { -class KernelCore; -} - namespace Kernel::Memory { class PageLinkedList; @@ -43,7 +37,7 @@ public: Mask = (0xF << Shift), }; - MemoryManager(KernelCore& kernel); + MemoryManager() = default; constexpr std::size_t GetSize(Pool pool) const { return managers[static_cast(pool)].GetSize(); @@ -95,24 +89,7 @@ private: }; private: - KLightLock pool_lock_0; - KLightLock pool_lock_1; - KLightLock pool_lock_2; - KLightLock pool_lock_3; - - KLightLock& PoolLock(std::size_t index) { - switch (index) { - case 0: - return pool_lock_0; - case 1: - return pool_lock_1; - case 2: - return pool_lock_2; - } - ASSERT(index == 3); - return pool_lock_3; - } - + std::array(Pool::Count)> pool_locks; std::array managers; }; diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index 5aa08c654..080886554 100755 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -57,7 +57,7 @@ constexpr std::size_t GetSizeInRange(const MemoryInfo& info, VAddr start, VAddr } // namespace -PageTable::PageTable(Core::System& system) : general_lock{system.Kernel()}, system{system} {} +PageTable::PageTable(Core::System& system) : system{system} {} ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, VAddr code_addr, std::size_t code_size, @@ -272,7 +272,7 @@ ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_t ResultCode PageTable::MapProcessCode(VAddr addr, std::size_t num_pages, MemoryState state, MemoryPermission perm) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; const u64 size{num_pages * PageSize}; @@ -295,7 +295,7 @@ ResultCode PageTable::MapProcessCode(VAddr addr, std::size_t num_pages, MemorySt } ResultCode PageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; const std::size_t num_pages{size / PageSize}; @@ -332,7 +332,7 @@ ResultCode PageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std:: } ResultCode PageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; if (!size) { return RESULT_SUCCESS; @@ -394,7 +394,7 @@ void PageTable::MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, } ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; std::size_t mapped_size{}; const VAddr end_addr{addr + size}; @@ -444,7 +444,7 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { } ResultCode PageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; const VAddr end_addr{addr + size}; ResultCode result{RESULT_SUCCESS}; @@ -481,7 +481,7 @@ ResultCode PageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { } ResultCode PageTable::UnmapMemory(VAddr addr, std::size_t size) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; const VAddr end_addr{addr + size}; ResultCode result{RESULT_SUCCESS}; @@ -517,7 +517,7 @@ ResultCode PageTable::UnmapMemory(VAddr addr, std::size_t size) { } ResultCode PageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; MemoryState src_state{}; CASCADE_CODE(CheckMemoryState( @@ -555,7 +555,7 @@ ResultCode PageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) { } ResultCode PageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; MemoryState src_state{}; CASCADE_CODE(CheckMemoryState( @@ -620,7 +620,7 @@ ResultCode PageTable::MapPages(VAddr addr, const PageLinkedList& page_linked_lis ResultCode PageTable::MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state, MemoryPermission perm) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; const std::size_t num_pages{page_linked_list.GetNumPages()}; const std::size_t size{num_pages * PageSize}; @@ -642,7 +642,7 @@ ResultCode PageTable::MapPages(VAddr addr, PageLinkedList& page_linked_list, Mem ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; MemoryState prev_state{}; MemoryPermission prev_perm{}; @@ -688,7 +688,7 @@ ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, Memo } MemoryInfo PageTable::QueryInfoImpl(VAddr addr) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; return block_manager->FindBlock(addr).GetMemoryInfo(); } @@ -703,7 +703,7 @@ MemoryInfo PageTable::QueryInfo(VAddr addr) { } ResultCode PageTable::ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; MemoryState state{}; MemoryAttribute attribute{}; @@ -721,7 +721,7 @@ ResultCode PageTable::ReserveTransferMemory(VAddr addr, std::size_t size, Memory } ResultCode PageTable::ResetTransferMemory(VAddr addr, std::size_t size) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; MemoryState state{}; @@ -739,7 +739,7 @@ ResultCode PageTable::ResetTransferMemory(VAddr addr, std::size_t size) { ResultCode PageTable::SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask, MemoryAttribute value) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; MemoryState state{}; MemoryPermission perm{}; @@ -760,7 +760,7 @@ ResultCode PageTable::SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAtt } ResultCode PageTable::SetHeapCapacity(std::size_t new_heap_capacity) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; heap_capacity = new_heap_capacity; return RESULT_SUCCESS; } @@ -777,7 +777,7 @@ ResultVal PageTable::SetHeapSize(std::size_t size) { // Increase the heap size { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; const u64 delta{size - previous_heap_size}; @@ -813,7 +813,7 @@ ResultVal PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, s bool is_map_only, VAddr region_start, std::size_t region_num_pages, MemoryState state, MemoryPermission perm, PAddr map_addr) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; if (!CanContain(region_start, region_num_pages * PageSize, state)) { return ERR_INVALID_ADDRESS_STATE; @@ -844,7 +844,7 @@ ResultVal PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, s } ResultCode PageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; MemoryPermission perm{}; if (const ResultCode result{CheckMemoryState( @@ -867,7 +867,7 @@ ResultCode PageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { } ResultCode PageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; MemoryPermission perm{}; if (const ResultCode result{CheckMemoryState( @@ -937,7 +937,7 @@ VAddr PageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group, OperationType operation) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; ASSERT(Common::IsAligned(addr, PageSize)); ASSERT(num_pages > 0); @@ -962,7 +962,7 @@ ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, const PageLinke ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm, OperationType operation, PAddr map_addr) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; ASSERT(num_pages > 0); ASSERT(Common::IsAligned(addr, PageSize)); @@ -1123,7 +1123,7 @@ ResultCode PageTable::CheckMemoryState(MemoryState* out_state, MemoryPermission* MemoryPermission perm_mask, MemoryPermission perm, MemoryAttribute attr_mask, MemoryAttribute attr, MemoryAttribute ignore_attr) { - KScopedLightLock lk{general_lock}; + std::lock_guard lock{page_table_lock}; // Get information about the first block const VAddr last_addr{addr + size - 1}; diff --git a/src/core/hle/kernel/memory/page_table.h b/src/core/hle/kernel/memory/page_table.h index 2e91837ac..ce0d38849 100755 --- a/src/core/hle/kernel/memory/page_table.h +++ b/src/core/hle/kernel/memory/page_table.h @@ -111,7 +111,7 @@ private: perm, attr_mask, attr, ignore_attr); } - KLightLock general_lock; + std::recursive_mutex page_table_lock; std::unique_ptr block_manager; public: diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 9f4583b49..0edbfc4cc 100755 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -138,7 +138,7 @@ std::shared_ptr Process::GetResourceLimit() const { void Process::IncrementThreadCount() { ASSERT(num_threads >= 0); - ++num_created_threads; + num_created_threads++; if (const auto count = ++num_threads; count > peak_num_threads) { peak_num_threads = count; @@ -443,7 +443,7 @@ bool Process::IsSignaled() const { Process::Process(Core::System& system) : KSynchronizationObject{system.Kernel()}, page_table{std::make_unique(system)}, handle_table{system.Kernel()}, - address_arbiter{system}, condition_var{system}, system{system} {} + address_arbiter{system}, condition_var{system}, state_lock{system.Kernel()}, system{system} {} Process::~Process() = default; diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 11d78f3a8..26e647743 100755 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -348,6 +348,10 @@ public: void PinCurrentThread(); void UnpinCurrentThread(); + KLightLock& GetStateLock() { + return state_lock; + } + /////////////////////////////////////////////////////////////////////////////////////////////// // Thread-local storage management @@ -472,6 +476,8 @@ private: KThread* exception_thread{}; + KLightLock state_lock; + /// System context Core::System& system; }; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index dbef854f8..7fd514e9d 100755 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -1450,11 +1450,14 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e Svc::ResultInvalidPriority); R_UNLESS(process.CheckThreadPriority(priority), Svc::ResultInvalidPriority); - ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1)); + ASSERT(process.GetResourceLimit()->Reserve(ResourceType::Threads, 1)); - CASCADE_RESULT(std::shared_ptr thread, - KThread::Create(system, ThreadType::User, "", entry_point, priority, arg, - core_id, stack_bottom, &process)); + std::shared_ptr thread; + { + KScopedLightLock lk{process.GetStateLock()}; + CASCADE_RESULT(thread, KThread::Create(system, ThreadType::User, "", entry_point, priority, + arg, core_id, stack_bottom, &process)); + } const auto new_thread_handle = process.GetHandleTable().Create(thread); if (new_thread_handle.Failed()) { diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index cbec692f9..0e5156dcc 100755 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -93,8 +93,10 @@ std::vector> WaitTreeItem::MakeThreadItemList() std::size_t row = 0; auto add_threads = [&](const std::vector>& threads) { for (std::size_t i = 0; i < threads.size(); ++i) { - item_list.push_back(std::make_unique(*threads[i])); - item_list.back()->row = row; + if (threads[i]->GetThreadTypeForDebugging() == Kernel::ThreadType::User) { + item_list.push_back(std::make_unique(*threads[i])); + item_list.back()->row = row; + } ++row; } }; @@ -148,6 +150,10 @@ QString WaitTreeCallstack::GetText() const { std::vector> WaitTreeCallstack::GetChildren() const { std::vector> list; + if (thread.GetThreadTypeForDebugging() != Kernel::ThreadType::User) { + return list; + } + if (thread.GetOwnerProcess() == nullptr || !thread.GetOwnerProcess()->Is64BitProcess()) { return list; }