From fdaddafa9ebe03f99377514925f81c3036374281 Mon Sep 17 00:00:00 2001 From: pineappleEA Date: Tue, 30 Nov 2021 21:23:11 +0100 Subject: [PATCH] early-access version 2259 --- README.md | 2 +- src/core/CMakeLists.txt | 2 - src/core/core.cpp | 6 + src/core/core.h | 3 + src/core/cpu_manager.cpp | 23 +- src/core/hle/kernel/k_address_arbiter.cpp | 92 ++++--- src/core/hle/kernel/k_auto_object.h | 4 - src/core/hle/kernel/k_condition_variable.cpp | 243 +++++++++-------- src/core/hle/kernel/k_condition_variable.h | 2 +- src/core/hle/kernel/k_handle_table.cpp | 6 - src/core/hle/kernel/k_handle_table.h | 2 - .../hle/kernel/k_light_condition_variable.h | 60 ++++- src/core/hle/kernel/k_light_lock.cpp | 69 +++-- src/core/hle/kernel/k_light_lock.h | 2 +- src/core/hle/kernel/k_process.cpp | 28 +- src/core/hle/kernel/k_process.h | 7 +- src/core/hle/kernel/k_scheduler.cpp | 99 +++---- src/core/hle/kernel/k_scheduler.h | 2 - src/core/hle/kernel/k_scheduler_lock.h | 10 - .../k_scoped_scheduler_lock_and_sleep.h | 1 - src/core/hle/kernel/k_server_session.cpp | 3 +- .../hle/kernel/k_synchronization_object.cpp | 151 ++++++----- .../hle/kernel/k_synchronization_object.h | 32 --- src/core/hle/kernel/k_thread.cpp | 248 +++++++----------- src/core/hle/kernel/k_thread.h | 72 +---- src/core/hle/kernel/k_thread_queue.h | 74 +++++- src/core/hle/kernel/kernel.cpp | 81 +++--- src/core/hle/kernel/kernel.h | 5 - src/core/hle/kernel/service_thread.cpp | 37 ++- src/core/hle/kernel/svc.cpp | 33 +-- src/core/hle/kernel/time_manager.cpp | 6 +- src/core/hle/service/friend/friend.cpp | 23 +- 32 files changed, 665 insertions(+), 763 deletions(-) diff --git a/README.md b/README.md index c3c985c52..aec7e33a4 100755 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ yuzu emulator early access ============= -This is the source code for early-access 2258. +This is the source code for early-access 2259. ## Legal Notice diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index cd99b447b..eee8e2ccd 100755 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -185,7 +185,6 @@ add_library(core STATIC hle/kernel/k_event.h hle/kernel/k_handle_table.cpp hle/kernel/k_handle_table.h - hle/kernel/k_light_condition_variable.cpp hle/kernel/k_light_condition_variable.h hle/kernel/k_light_lock.cpp hle/kernel/k_light_lock.h @@ -238,7 +237,6 @@ add_library(core STATIC hle/kernel/k_system_control.h hle/kernel/k_thread.cpp hle/kernel/k_thread.h - hle/kernel/k_thread_queue.cpp hle/kernel/k_thread_queue.h hle/kernel/k_trace.h hle/kernel/k_transfer_memory.cpp diff --git a/src/core/core.cpp b/src/core/core.cpp index aa96f709b..473ab9f81 100755 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -521,6 +521,12 @@ const ARM_Interface& System::CurrentArmInterface() const { return impl->kernel.CurrentPhysicalCore().ArmInterface(); } +std::size_t System::CurrentCoreIndex() const { + std::size_t core = impl->kernel.GetCurrentHostThreadID(); + ASSERT(core < Core::Hardware::NUM_CPU_CORES); + return core; +} + Kernel::PhysicalCore& System::CurrentPhysicalCore() { return impl->kernel.CurrentPhysicalCore(); } diff --git a/src/core/core.h b/src/core/core.h index 52ff90359..645e5c241 100755 --- a/src/core/core.h +++ b/src/core/core.h @@ -208,6 +208,9 @@ public: /// Gets an ARM interface to the CPU core that is currently running [[nodiscard]] const ARM_Interface& CurrentArmInterface() const; + /// Gets the index of the currently running CPU core + [[nodiscard]] std::size_t CurrentCoreIndex() const; + /// Gets the physical core for the CPU core that is currently running [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index cbcc54891..5d43c6e5d 100755 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp @@ -117,18 +117,17 @@ void CpuManager::MultiCoreRunGuestLoop() { physical_core = &kernel.CurrentPhysicalCore(); } system.ExitDynarmicProfile(); - { - Kernel::KScopedDisableDispatch dd(kernel); - physical_core->ArmInterface().ClearExclusiveState(); - } + physical_core->ArmInterface().ClearExclusiveState(); + kernel.CurrentScheduler()->RescheduleCurrentCore(); } } void CpuManager::MultiCoreRunIdleThread() { auto& kernel = system.Kernel(); while (true) { - Kernel::KScopedDisableDispatch dd(kernel); - kernel.CurrentPhysicalCore().Idle(); + auto& physical_core = kernel.CurrentPhysicalCore(); + physical_core.Idle(); + kernel.CurrentScheduler()->RescheduleCurrentCore(); } } @@ -136,12 +135,12 @@ void CpuManager::MultiCoreRunSuspendThread() { auto& kernel = system.Kernel(); kernel.CurrentScheduler()->OnThreadStart(); while (true) { - auto core = kernel.CurrentPhysicalCoreIndex(); + auto core = kernel.GetCurrentHostThreadID(); auto& scheduler = *kernel.CurrentScheduler(); Kernel::KThread* current_thread = scheduler.GetCurrentThread(); Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); ASSERT(scheduler.ContextSwitchPending()); - ASSERT(core == kernel.CurrentPhysicalCoreIndex()); + ASSERT(core == kernel.GetCurrentHostThreadID()); scheduler.RescheduleCurrentCore(); } } @@ -347,11 +346,15 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { sc_sync_first_use = false; } - // Emulation was stopped - if (stop_token.stop_requested()) { + // Abort if emulation was killed before the session really starts + if (!system.IsPoweredOn()) { return; } + if (stop_token.stop_requested()) { + break; + } + auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); data.is_running = true; Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext()); diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index a4ce99402..1b429bc1e 100755 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp @@ -8,7 +8,6 @@ #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_thread.h" -#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/time_manager.h" @@ -29,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) { bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { auto& monitor = system.Monitor(); - const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); + const auto current_core = system.CurrentCoreIndex(); // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should call CanAccessAtomic(..) here. @@ -59,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { auto& monitor = system.Monitor(); - const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); + const auto current_core = system.CurrentCoreIndex(); // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should call CanAccessAtomic(..) here. @@ -86,27 +85,6 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 return true; } -class ThreadQueueImplForKAddressArbiter final : public KThreadQueue { -public: - explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t) - : KThreadQueue(kernel_), m_tree(t) {} - - virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result, - bool cancel_timer_task) override { - // If the thread is waiting on an address arbiter, remove it from the tree. - if (waiting_thread->IsWaitingForAddressArbiter()) { - m_tree->erase(m_tree->iterator_to(*waiting_thread)); - waiting_thread->ClearAddressArbiter(); - } - - // Invoke the base cancel wait handler. - KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); - } - -private: - KAddressArbiter::ThreadTree* m_tree; -}; - } // namespace ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) { @@ -118,14 +96,14 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) { auto it = thread_tree.nfind_light({addr, -1}); while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { - // End the thread's wait. KThread* target_thread = std::addressof(*it); - target_thread->EndWait(ResultSuccess); + target_thread->SetSyncedObject(nullptr, ResultSuccess); ASSERT(target_thread->IsWaitingForAddressArbiter()); - target_thread->ClearAddressArbiter(); + target_thread->Wakeup(); it = thread_tree.erase(it); + target_thread->ClearAddressArbiter(); ++num_waiters; } } @@ -151,14 +129,14 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 auto it = thread_tree.nfind_light({addr, -1}); while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { - // End the thread's wait. KThread* target_thread = std::addressof(*it); - target_thread->EndWait(ResultSuccess); + target_thread->SetSyncedObject(nullptr, ResultSuccess); ASSERT(target_thread->IsWaitingForAddressArbiter()); - target_thread->ClearAddressArbiter(); + target_thread->Wakeup(); it = thread_tree.erase(it); + target_thread->ClearAddressArbiter(); ++num_waiters; } } @@ -219,14 +197,14 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { - // End the thread's wait. KThread* target_thread = std::addressof(*it); - target_thread->EndWait(ResultSuccess); + target_thread->SetSyncedObject(nullptr, ResultSuccess); ASSERT(target_thread->IsWaitingForAddressArbiter()); - target_thread->ClearAddressArbiter(); + target_thread->Wakeup(); it = thread_tree.erase(it); + target_thread->ClearAddressArbiter(); ++num_waiters; } } @@ -236,7 +214,6 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { // Prepare to wait. KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); - ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); { KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; @@ -247,6 +224,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement return ResultTerminationRequested; } + // Set the synced object. + cur_thread->SetSyncedObject(nullptr, ResultTimedOut); + // Read the value from userspace. s32 user_value{}; bool succeeded{}; @@ -276,20 +256,31 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement // Set the arbiter. cur_thread->SetAddressArbiter(&thread_tree, addr); thread_tree.insert(*cur_thread); - - // Wait for the thread to finish. - cur_thread->BeginWait(std::addressof(wait_queue)); + cur_thread->SetState(ThreadState::Waiting); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); } + // Cancel the timer wait. + kernel.TimeManager().UnscheduleTimeEvent(cur_thread); + + // Remove from the address arbiter. + { + KScopedSchedulerLock sl(kernel); + + if (cur_thread->IsWaitingForAddressArbiter()) { + thread_tree.erase(thread_tree.iterator_to(*cur_thread)); + cur_thread->ClearAddressArbiter(); + } + } + // Get the result. - return cur_thread->GetWaitResult(); + KSynchronizationObject* dummy{}; + return cur_thread->GetWaitResult(&dummy); } ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { // Prepare to wait. KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); - ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); { KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; @@ -300,6 +291,9 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { return ResultTerminationRequested; } + // Set the synced object. + cur_thread->SetSyncedObject(nullptr, ResultTimedOut); + // Read the value from userspace. s32 user_value{}; if (!ReadFromUser(system, &user_value, addr)) { @@ -322,14 +316,26 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { // Set the arbiter. cur_thread->SetAddressArbiter(&thread_tree, addr); thread_tree.insert(*cur_thread); - - // Wait for the thread to finish. - cur_thread->BeginWait(std::addressof(wait_queue)); + cur_thread->SetState(ThreadState::Waiting); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); } + // Cancel the timer wait. + kernel.TimeManager().UnscheduleTimeEvent(cur_thread); + + // Remove from the address arbiter. + { + KScopedSchedulerLock sl(kernel); + + if (cur_thread->IsWaitingForAddressArbiter()) { + thread_tree.erase(thread_tree.iterator_to(*cur_thread)); + cur_thread->ClearAddressArbiter(); + } + } + // Get the result. - return cur_thread->GetWaitResult(); + KSynchronizationObject* dummy{}; + return cur_thread->GetWaitResult(&dummy); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index 165b76747..e4fcdbc67 100755 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h @@ -170,10 +170,6 @@ public: } } - const std::string& GetName() const { - return name; - } - private: void RegisterWithKernel(); void UnregisterWithKernel(); diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index f343e3c2f..7fa9b8cc3 100755 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -11,7 +11,6 @@ #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" -#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_common.h" #include "core/hle/kernel/svc_results.h" @@ -34,7 +33,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) { bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, u32 new_orr_mask) { auto& monitor = system.Monitor(); - const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); + const auto current_core = system.CurrentCoreIndex(); // Load the value from the address. const auto expected = monitor.ExclusiveRead32(current_core, address); @@ -58,48 +57,6 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero return true; } -class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue { -public: - explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_) - : KThreadQueue(kernel_) {} - - virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result, - bool cancel_timer_task) override { - // Remove the thread as a waiter from its owner. - waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread); - - // Invoke the base cancel wait handler. - KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); - } -}; - -class ThreadQueueImplForKConditionVariableWaitConditionVariable final : public KThreadQueue { -private: - KConditionVariable::ThreadTree* m_tree; - -public: - explicit ThreadQueueImplForKConditionVariableWaitConditionVariable( - KernelCore& kernel_, KConditionVariable::ThreadTree* t) - : KThreadQueue(kernel_), m_tree(t) {} - - virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result, - bool cancel_timer_task) override { - // Remove the thread as a waiter from its owner. - if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) { - owner->RemoveWaiter(waiting_thread); - } - - // If the thread is waiting on a condvar, remove it from the tree. - if (waiting_thread->IsWaitingForConditionVariable()) { - m_tree->erase(m_tree->iterator_to(*waiting_thread)); - waiting_thread->ClearConditionVariable(); - } - - // Invoke the base cancel wait handler. - KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); - } -}; - } // namespace KConditionVariable::KConditionVariable(Core::System& system_) @@ -121,77 +78,84 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) { // Determine the next tag. u32 next_value{}; - if (next_owner_thread != nullptr) { + if (next_owner_thread) { next_value = next_owner_thread->GetAddressKeyValue(); if (num_waiters > 1) { next_value |= Svc::HandleWaitMask; } - // Write the value to userspace. - ResultCode result{ResultSuccess}; - if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] { - result = ResultSuccess; - } else { - result = ResultInvalidCurrentMemory; + next_owner_thread->SetSyncedObject(nullptr, ResultSuccess); + next_owner_thread->Wakeup(); + } + + // Write the value to userspace. + if (!WriteToUser(system, addr, std::addressof(next_value))) { + if (next_owner_thread) { + next_owner_thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory); } - // Signal the next owner thread. - next_owner_thread->EndWait(result); - return result; - } else { - // Just write the value to userspace. - R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)), - ResultInvalidCurrentMemory); - - return ResultSuccess; + return ResultInvalidCurrentMemory; } } + + return ResultSuccess; } ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); - ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel); // Wait for the address. - KThread* owner_thread{}; { - KScopedSchedulerLock sl(kernel); + KScopedAutoObject owner_thread; + ASSERT(owner_thread.IsNull()); + { + KScopedSchedulerLock sl(kernel); + cur_thread->SetSyncedObject(nullptr, ResultSuccess); - // Check if the thread should terminate. - R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); + // Check if the thread should terminate. + R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); - // Read the tag from userspace. - u32 test_tag{}; - R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory); + { + // Read the tag from userspace. + u32 test_tag{}; + R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), + ResultInvalidCurrentMemory); - // If the tag isn't the handle (with wait mask), we're done. - R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask)); + // If the tag isn't the handle (with wait mask), we're done. + R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), ResultSuccess); - // Get the lock owner thread. - owner_thread = kernel.CurrentProcess() - ->GetHandleTable() - .GetObjectWithoutPseudoHandle(handle) - .ReleasePointerUnsafe(); - R_UNLESS(owner_thread != nullptr, ResultInvalidHandle); + // Get the lock owner thread. + owner_thread = + kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle( + handle); + R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle); - // Update the lock. - cur_thread->SetAddressKey(addr, value); - owner_thread->AddWaiter(cur_thread); - - // Begin waiting. - cur_thread->BeginWait(std::addressof(wait_queue)); - cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); - cur_thread->SetMutexWaitAddressForDebugging(addr); + // Update the lock. + cur_thread->SetAddressKey(addr, value); + owner_thread->AddWaiter(cur_thread); + cur_thread->SetState(ThreadState::Waiting); + cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); + cur_thread->SetMutexWaitAddressForDebugging(addr); + } + } + ASSERT(owner_thread.IsNotNull()); } - // Close our reference to the owner thread, now that the wait is over. - owner_thread->Close(); + // Remove the thread as a waiter from the lock owner. + { + KScopedSchedulerLock sl(kernel); + KThread* owner_thread = cur_thread->GetLockOwner(); + if (owner_thread != nullptr) { + owner_thread->RemoveWaiter(cur_thread); + } + } // Get the wait result. - return cur_thread->GetWaitResult(); + KSynchronizationObject* dummy{}; + return cur_thread->GetWaitResult(std::addressof(dummy)); } -void KConditionVariable::SignalImpl(KThread* thread) { +KThread* KConditionVariable::SignalImpl(KThread* thread) { // Check pre-conditions. ASSERT(kernel.GlobalSchedulerContext().IsLocked()); @@ -205,16 +169,18 @@ void KConditionVariable::SignalImpl(KThread* thread) { // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should call CanAccessAtomic(..) here. can_access = true; - if (can_access) [[likely]] { + if (can_access) { UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag, Svc::HandleWaitMask); } } - if (can_access) [[likely]] { + KThread* thread_to_close = nullptr; + if (can_access) { if (prev_tag == Svc::InvalidHandle) { // If nobody held the lock previously, we're all good. - thread->EndWait(ResultSuccess); + thread->SetSyncedObject(nullptr, ResultSuccess); + thread->Wakeup(); } else { // Get the previous owner. KThread* owner_thread = kernel.CurrentProcess() @@ -223,24 +189,35 @@ void KConditionVariable::SignalImpl(KThread* thread) { static_cast(prev_tag & ~Svc::HandleWaitMask)) .ReleasePointerUnsafe(); - if (owner_thread) [[likely]] { + if (owner_thread) { // Add the thread as a waiter on the owner. owner_thread->AddWaiter(thread); - owner_thread->Close(); + thread_to_close = owner_thread; } else { // The lock was tagged with a thread that doesn't exist. - thread->EndWait(ResultInvalidState); + thread->SetSyncedObject(nullptr, ResultInvalidState); + thread->Wakeup(); } } } else { // If the address wasn't accessible, note so. - thread->EndWait(ResultInvalidCurrentMemory); + thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory); + thread->Wakeup(); } + + return thread_to_close; } void KConditionVariable::Signal(u64 cv_key, s32 count) { + // Prepare for signaling. + constexpr int MaxThreads = 16; + + KLinkedList thread_list{kernel}; + std::array thread_array; + s32 num_to_close{}; + // Perform signaling. - int num_waiters = 0; + s32 num_waiters{}; { KScopedSchedulerLock sl(kernel); @@ -249,7 +226,14 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { (it->GetConditionVariableKey() == cv_key)) { KThread* target_thread = std::addressof(*it); - this->SignalImpl(target_thread); + if (KThread* thread = SignalImpl(target_thread); thread != nullptr) { + if (num_to_close < MaxThreads) { + thread_array[num_to_close++] = thread; + } else { + thread_list.push_back(*thread); + } + } + it = thread_tree.erase(it); target_thread->ClearConditionVariable(); ++num_waiters; @@ -257,20 +241,31 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { // If we have no waiters, clear the has waiter flag. if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) { - const u32 has_waiter_flag = 0; + const u32 has_waiter_flag{}; WriteToUser(system, cv_key, std::addressof(has_waiter_flag)); } } + + // Close threads in the array. + for (auto i = 0; i < num_to_close; ++i) { + thread_array[i]->Close(); + } + + // Close threads in the list. + for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) { + (*it).Close(); + } } ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { // Prepare to wait. - KThread* cur_thread = GetCurrentThreadPointer(kernel); - ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue( - kernel, std::addressof(thread_tree)); + KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); { - KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout); + KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; + + // Set the synced object. + cur_thread->SetSyncedObject(nullptr, ResultTimedOut); // Check that the thread isn't terminating. if (cur_thread->IsTerminationRequested()) { @@ -295,7 +290,8 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) } // Wake up the next owner. - next_owner_thread->EndWait(ResultSuccess); + next_owner_thread->SetSyncedObject(nullptr, ResultSuccess); + next_owner_thread->Wakeup(); } // Write to the cv key. @@ -312,21 +308,40 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) } } - // If timeout is zero, time out. - R_UNLESS(timeout != 0, ResultTimedOut); - // Update condition variable tracking. - cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value); - thread_tree.insert(*cur_thread); + { + cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value); + thread_tree.insert(*cur_thread); + } - // Begin waiting. - cur_thread->BeginWait(std::addressof(wait_queue)); - cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); - cur_thread->SetMutexWaitAddressForDebugging(addr); + // If the timeout is non-zero, set the thread as waiting. + if (timeout != 0) { + cur_thread->SetState(ThreadState::Waiting); + cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); + cur_thread->SetMutexWaitAddressForDebugging(addr); + } } - // Get the wait result. - return cur_thread->GetWaitResult(); + // Cancel the timer wait. + kernel.TimeManager().UnscheduleTimeEvent(cur_thread); + + // Remove from the condition variable. + { + KScopedSchedulerLock sl(kernel); + + if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) { + owner->RemoveWaiter(cur_thread); + } + + if (cur_thread->IsWaitingForConditionVariable()) { + thread_tree.erase(thread_tree.iterator_to(*cur_thread)); + cur_thread->ClearConditionVariable(); + } + } + + // Get the result. + KSynchronizationObject* dummy{}; + return cur_thread->GetWaitResult(std::addressof(dummy)); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h index 5e4815d08..861dbd420 100755 --- a/src/core/hle/kernel/k_condition_variable.h +++ b/src/core/hle/kernel/k_condition_variable.h @@ -34,7 +34,7 @@ public: [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout); private: - void SignalImpl(KThread* thread); + [[nodiscard]] KThread* SignalImpl(KThread* thread); ThreadTree thread_tree; diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index cf95f0852..e90fc0628 100755 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp @@ -13,7 +13,6 @@ ResultCode KHandleTable::Finalize() { // Get the table and clear our record of it. u16 saved_table_size = 0; { - KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); std::swap(m_table_size, saved_table_size); @@ -44,7 +43,6 @@ bool KHandleTable::Remove(Handle handle) { // Find the object and free the entry. KAutoObject* obj = nullptr; { - KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); if (this->IsValidHandle(handle)) { @@ -64,7 +62,6 @@ bool KHandleTable::Remove(Handle handle) { } ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { - KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Never exceed our capacity. @@ -87,7 +84,6 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { } ResultCode KHandleTable::Reserve(Handle* out_handle) { - KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Never exceed our capacity. @@ -98,7 +94,6 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) { } void KHandleTable::Unreserve(Handle handle) { - KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Unpack the handle. @@ -117,7 +112,6 @@ void KHandleTable::Unreserve(Handle handle) { } void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { - KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Unpack the handle. diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 4b114ec2f..95ec905ae 100755 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h @@ -68,7 +68,6 @@ public: template KScopedAutoObject GetObjectWithoutPseudoHandle(Handle handle) const { // Lock and look up in table. - KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); if constexpr (std::is_same_v) { @@ -123,7 +122,6 @@ public: size_t num_opened; { // Lock the table. - KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); for (num_opened = 0; num_opened < num_handles; num_opened++) { // Get the current handle. diff --git a/src/core/hle/kernel/k_light_condition_variable.h b/src/core/hle/kernel/k_light_condition_variable.h index 65d3bc3e1..fb0ad783a 100755 --- a/src/core/hle/kernel/k_light_condition_variable.h +++ b/src/core/hle/kernel/k_light_condition_variable.h @@ -1,25 +1,73 @@ -// Copyright 2021 yuzu Emulator Project +// Copyright 2020 yuzu Emulator Project // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + #pragma once #include "common/common_types.h" -#include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" +#include "core/hle/kernel/time_manager.h" namespace Kernel { - class KernelCore; -class KLightLock; class KLightConditionVariable { public: explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {} - void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true); - void Broadcast(); + void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true) { + WaitImpl(lock, timeout, allow_terminating_thread); + } + + void Broadcast() { + KScopedSchedulerLock lk{kernel}; + + // Signal all threads. + for (auto& thread : wait_list) { + thread.SetState(ThreadState::Runnable); + } + } private: + void WaitImpl(KLightLock* lock, s64 timeout, bool allow_terminating_thread) { + KThread* owner = GetCurrentThreadPointer(kernel); + + // Sleep the thread. + { + KScopedSchedulerLockAndSleep lk{kernel, owner, timeout}; + + if (!allow_terminating_thread && owner->IsTerminationRequested()) { + lk.CancelSleep(); + return; + } + + lock->Unlock(); + + // Set the thread as waiting. + GetCurrentThread(kernel).SetState(ThreadState::Waiting); + + // Add the thread to the queue. + wait_list.push_back(GetCurrentThread(kernel)); + } + + // Remove the thread from the wait list. + { + KScopedSchedulerLock sl{kernel}; + + wait_list.erase(wait_list.iterator_to(GetCurrentThread(kernel))); + } + + // Cancel the task that the sleep setup. + kernel.TimeManager().UnscheduleTimeEvent(owner); + + // Re-acquire the lock. + lock->Lock(); + } + KernelCore& kernel; KThread::WaiterList wait_list{}; }; diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp index 5e8f1a510..0896e705f 100755 --- a/src/core/hle/kernel/k_light_lock.cpp +++ b/src/core/hle/kernel/k_light_lock.cpp @@ -5,54 +5,44 @@ #include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_thread.h" -#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" namespace Kernel { -namespace { - -class ThreadQueueImplForKLightLock final : public KThreadQueue { -public: - explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {} - - virtual void CancelWait([[maybe_unused]] KThread* waiting_thread, - [[maybe_unused]] ResultCode wait_result, - [[maybe_unused]] bool cancel_timer_task) override { - // Do nothing, waiting to acquire a light lock cannot be canceled. - } -}; - -} // namespace - void KLightLock::Lock() { const uintptr_t cur_thread = reinterpret_cast(GetCurrentThreadPointer(kernel)); + const uintptr_t cur_thread_tag = (cur_thread | 1); while (true) { uintptr_t old_tag = tag.load(std::memory_order_relaxed); - while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1), + while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, std::memory_order_acquire)) { + if ((old_tag | 1) == cur_thread_tag) { + return; + } } - if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) { + if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) { break; } + + LockSlowPath(old_tag | 1, cur_thread); } } void KLightLock::Unlock() { const uintptr_t cur_thread = reinterpret_cast(GetCurrentThreadPointer(kernel)); - uintptr_t expected = cur_thread; - if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) { - this->UnlockSlowPath(cur_thread); - } + do { + if (expected != cur_thread) { + return UnlockSlowPath(cur_thread); + } + } while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release)); } -bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { +void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { KThread* cur_thread = reinterpret_cast(_cur_thread); - ThreadQueueImplForKLightLock wait_queue(kernel); // Pend the current thread waiting on the owner thread. { @@ -60,23 +50,30 @@ bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { // Ensure we actually have locking to do. if (tag.load(std::memory_order_relaxed) != _owner) { - return false; + return; } // Add the current thread as a waiter on the owner. - KThread* owner_thread = reinterpret_cast(_owner & ~1ul); + KThread* owner_thread = reinterpret_cast(_owner & ~1ULL); cur_thread->SetAddressKey(reinterpret_cast(std::addressof(tag))); owner_thread->AddWaiter(cur_thread); - // Begin waiting to hold the lock. - cur_thread->BeginWait(std::addressof(wait_queue)); + // Set thread states. + cur_thread->SetState(ThreadState::Waiting); if (owner_thread->IsSuspended()) { owner_thread->ContinueIfHasKernelWaiters(); } } - return true; + // We're no longer waiting on the lock owner. + { + KScopedSchedulerLock sl{kernel}; + + if (KThread* owner_thread = cur_thread->GetLockOwner(); owner_thread != nullptr) { + owner_thread->RemoveWaiter(cur_thread); + } + } } void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { @@ -84,20 +81,22 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { // Unlock. { - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl{kernel}; // Get the next owner. - s32 num_waiters; + s32 num_waiters = 0; KThread* next_owner = owner_thread->RemoveWaiterByKey( std::addressof(num_waiters), reinterpret_cast(std::addressof(tag))); // Pass the lock to the next owner. uintptr_t next_tag = 0; if (next_owner != nullptr) { - next_tag = - reinterpret_cast(next_owner) | static_cast(num_waiters > 1); + next_tag = reinterpret_cast(next_owner); + if (num_waiters > 1) { + next_tag |= 0x1; + } - next_owner->EndWait(ResultSuccess); + next_owner->SetState(ThreadState::Runnable); if (next_owner->IsSuspended()) { next_owner->ContinueIfHasKernelWaiters(); @@ -111,7 +110,7 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { } // Write the new tag value. - tag.store(next_tag, std::memory_order_release); + tag.store(next_tag); } } diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h index 4163b8a85..ad853661d 100755 --- a/src/core/hle/kernel/k_light_lock.h +++ b/src/core/hle/kernel/k_light_lock.h @@ -20,7 +20,7 @@ public: void Unlock(); - bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread); + void LockSlowPath(uintptr_t owner, uintptr_t cur_thread); void UnlockSlowPath(uintptr_t cur_thread); diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 90dda40dc..1aad061e1 100755 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -60,7 +60,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority thread->GetContext64().cpu_registers[0] = 0; thread->GetContext32().cpu_registers[1] = thread_handle; thread->GetContext64().cpu_registers[1] = thread_handle; - thread->DisableDispatch(); auto& kernel = system.Kernel(); // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires @@ -228,15 +227,12 @@ void KProcess::PinCurrentThread() { const s32 core_id = GetCurrentCoreId(kernel); KThread* cur_thread = GetCurrentThreadPointer(kernel); - // If the thread isn't terminated, pin it. - if (!cur_thread->IsTerminationRequested()) { - // Pin it. - PinThread(core_id, cur_thread); - cur_thread->Pin(); + // Pin it. + PinThread(core_id, cur_thread); + cur_thread->Pin(); - // An update is needed. - KScheduler::SetSchedulerUpdateNeeded(kernel); - } + // An update is needed. + KScheduler::SetSchedulerUpdateNeeded(kernel); } void KProcess::UnpinCurrentThread() { @@ -254,20 +250,6 @@ void KProcess::UnpinCurrentThread() { KScheduler::SetSchedulerUpdateNeeded(kernel); } -void KProcess::UnpinThread(KThread* thread) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - - // Get the thread's core id. - const auto core_id = thread->GetActiveCore(); - - // Unpin it. - UnpinThread(core_id, thread); - thread->Unpin(); - - // An update is needed. - KScheduler::SetSchedulerUpdateNeeded(kernel); -} - ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, [[maybe_unused]] size_t size) { // Lock ourselves, to prevent concurrent access. diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index d972c9de0..8a8c1fcbb 100755 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -259,7 +259,7 @@ public: [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const { ASSERT(0 <= core_id && core_id < static_cast(Core::Hardware::NUM_CPU_CORES)); - return pinned_threads.at(core_id); + return pinned_threads[core_id]; } /// Gets 8 bytes of random data for svcGetInfo RandomEntropy @@ -347,7 +347,6 @@ public: void PinCurrentThread(); void UnpinCurrentThread(); - void UnpinThread(KThread* thread); KLightLock& GetStateLock() { return state_lock; @@ -369,14 +368,14 @@ private: void PinThread(s32 core_id, KThread* thread) { ASSERT(0 <= core_id && core_id < static_cast(Core::Hardware::NUM_CPU_CORES)); ASSERT(thread != nullptr); - ASSERT(pinned_threads.at(core_id) == nullptr); + ASSERT(pinned_threads[core_id] == nullptr); pinned_threads[core_id] = thread; } void UnpinThread(s32 core_id, KThread* thread) { ASSERT(0 <= core_id && core_id < static_cast(Core::Hardware::NUM_CPU_CORES)); ASSERT(thread != nullptr); - ASSERT(pinned_threads.at(core_id) == thread); + ASSERT(pinned_threads[core_id] == thread); pinned_threads[core_id] = nullptr; } diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index d3b1b2419..6a7d80d03 100755 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -240,8 +240,8 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3 // If the thread is runnable, we want to change its priority in the queue. if (thread->GetRawState() == ThreadState::Runnable) { - GetPriorityQueue(kernel).ChangePriority(old_priority, - thread == kernel.GetCurrentEmuThread(), thread); + GetPriorityQueue(kernel).ChangePriority( + old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); IncrementScheduledCount(thread); SetSchedulerUpdateNeeded(kernel); } @@ -360,7 +360,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { } bool KScheduler::CanSchedule(KernelCore& kernel) { - return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1; + return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1; } bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { @@ -376,28 +376,20 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { } void KScheduler::DisableScheduling(KernelCore& kernel) { - // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { - return; + if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { + ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); + scheduler->GetCurrentThread()->DisableDispatch(); } - - ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); - GetCurrentThreadPointer(kernel)->DisableDispatch(); } void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { - // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { - return; - } - - ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1); - - if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) { - GetCurrentThreadPointer(kernel)->EnableDispatch(); - } else { - RescheduleCores(kernel, cores_needing_scheduling); + if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { + ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); + if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { + scheduler->GetCurrentThread()->EnableDispatch(); + } } + RescheduleCores(kernel, cores_needing_scheduling); } u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { @@ -625,17 +617,13 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c state.highest_priority_thread = nullptr; } -void KScheduler::Finalize() { +KScheduler::~KScheduler() { if (idle_thread) { idle_thread->Close(); idle_thread = nullptr; } } -KScheduler::~KScheduler() { - ASSERT(!idle_thread); -} - KThread* KScheduler::GetCurrentThread() const { if (auto result = current_thread.load(); result) { return result; @@ -654,12 +642,10 @@ void KScheduler::RescheduleCurrentCore() { if (phys_core.IsInterrupted()) { phys_core.ClearInterrupt(); } - guard.Lock(); if (state.needs_scheduling.load()) { Schedule(); } else { - GetCurrentThread()->EnableDispatch(); guard.Unlock(); } } @@ -669,33 +655,26 @@ void KScheduler::OnThreadStart() { } void KScheduler::Unload(KThread* thread) { - ASSERT(thread); - LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); - if (thread->IsCallingSvc()) { - thread->ClearIsCallingSvc(); + if (thread) { + if (thread->IsCallingSvc()) { + thread->ClearIsCallingSvc(); + } + if (!thread->IsTerminationRequested()) { + prev_thread = thread; + + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); + cpu_core.SaveContext(thread->GetContext32()); + cpu_core.SaveContext(thread->GetContext64()); + // Save the TPIDR_EL0 system register in case it was modified. + thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); + } else { + prev_thread = nullptr; + } + thread->context_guard.Unlock(); } - - auto& physical_core = system.Kernel().PhysicalCore(core_id); - if (!physical_core.IsInitialized()) { - return; - } - - Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); - cpu_core.SaveContext(thread->GetContext32()); - cpu_core.SaveContext(thread->GetContext64()); - // Save the TPIDR_EL0 system register in case it was modified. - thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - - if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { - prev_thread = thread; - } else { - prev_thread = nullptr; - } - - thread->context_guard.Unlock(); } void KScheduler::Reload(KThread* thread) { @@ -704,6 +683,11 @@ void KScheduler::Reload(KThread* thread) { if (thread) { ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); + auto* const thread_owner_process = thread->GetOwnerProcess(); + if (thread_owner_process != nullptr) { + system.Kernel().MakeCurrentProcess(thread_owner_process); + } + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); cpu_core.LoadContext(thread->GetContext32()); cpu_core.LoadContext(thread->GetContext64()); @@ -721,7 +705,7 @@ void KScheduler::SwitchContextStep2() { } void KScheduler::ScheduleImpl() { - KThread* previous_thread = GetCurrentThread(); + KThread* previous_thread = current_thread.load(); KThread* next_thread = state.highest_priority_thread; state.needs_scheduling = false; @@ -733,15 +717,10 @@ void KScheduler::ScheduleImpl() { // If we're not actually switching thread, there's nothing to do. if (next_thread == current_thread.load()) { - previous_thread->EnableDispatch(); guard.Unlock(); return; } - if (next_thread->GetCurrentCore() != core_id) { - next_thread->SetCurrentCore(core_id); - } - current_thread.store(next_thread); KProcess* const previous_process = system.Kernel().CurrentProcess(); @@ -752,7 +731,11 @@ void KScheduler::ScheduleImpl() { Unload(previous_thread); std::shared_ptr* old_context; - old_context = &previous_thread->GetHostContext(); + if (previous_thread != nullptr) { + old_context = &previous_thread->GetHostContext(); + } else { + old_context = &idle_thread->GetHostContext(); + } guard.Unlock(); Common::Fiber::YieldTo(*old_context, *switch_fiber); diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 82fcd99e7..7df288438 100755 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -33,8 +33,6 @@ public: explicit KScheduler(Core::System& system_, s32 core_id_); ~KScheduler(); - void Finalize(); - /// Reschedules to the next available thread (call after current thread is suspended) void RescheduleCurrentCore(); diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 93c47f1b1..c571f2992 100755 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -23,11 +23,6 @@ public: } void Lock() { - // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { - return; - } - if (IsLockedByCurrentThread()) { // If we already own the lock, we can just increment the count. ASSERT(lock_count > 0); @@ -48,11 +43,6 @@ public: } void Unlock() { - // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { - return; - } - ASSERT(IsLockedByCurrentThread()); ASSERT(lock_count > 0); diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index 2995c492d..61dc2858f 100755 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -8,7 +8,6 @@ #pragma once #include "common/common_types.h" -#include "core/hle/kernel/global_scheduler_context.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/time_manager.h" diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index d4e4a6b06..2bd53ccbd 100755 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp @@ -175,7 +175,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) { { KScopedSchedulerLock lock(kernel); if (!context.IsThreadWaiting()) { - context.GetThread().EndWait(result); + context.GetThread().Wakeup(); + context.GetThread().SetSyncedObject(nullptr, result); } } diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index ffeb4b73f..f168b4f21 100755 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp @@ -8,66 +8,11 @@ #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" -#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" namespace Kernel { -namespace { - -class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait { -public: - ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o, - KSynchronizationObject::ThreadListNode* n, s32 c) - : KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {} - - virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object, - ResultCode wait_result) override { - // Determine the sync index, and unlink all nodes. - s32 sync_index = -1; - for (auto i = 0; i < m_count; ++i) { - // Check if this is the signaled object. - if (m_objects[i] == signaled_object && sync_index == -1) { - sync_index = i; - } - - // Unlink the current node from the current object. - m_objects[i]->UnlinkNode(std::addressof(m_nodes[i])); - } - - // Set the waiting thread's sync index. - waiting_thread->SetSyncedIndex(sync_index); - - // Set the waiting thread as not cancellable. - waiting_thread->ClearCancellable(); - - // Invoke the base end wait handler. - KThreadQueue::EndWait(waiting_thread, wait_result); - } - - virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result, - bool cancel_timer_task) override { - // Remove all nodes from our list. - for (auto i = 0; i < m_count; ++i) { - m_objects[i]->UnlinkNode(std::addressof(m_nodes[i])); - } - - // Set the waiting thread as not cancellable. - waiting_thread->ClearCancellable(); - - // Invoke the base cancel wait handler. - KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); - } - -private: - KSynchronizationObject** m_objects; - KSynchronizationObject::ThreadListNode* m_nodes; - s32 m_count; -}; - -} // namespace - void KSynchronizationObject::Finalize() { this->OnFinalizeSynchronizationObject(); KAutoObject::Finalize(); @@ -80,19 +25,11 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, std::vector thread_nodes(num_objects); // Prepare for wait. - KThread* thread = GetCurrentThreadPointer(kernel_ctx); - ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects, - thread_nodes.data(), num_objects); + KThread* thread = kernel_ctx.CurrentScheduler()->GetCurrentThread(); { // Setup the scheduling lock and sleep. - KScopedSchedulerLockAndSleep slp(kernel_ctx, thread, timeout); - - // Check if the thread should terminate. - if (thread->IsTerminationRequested()) { - slp.CancelSleep(); - return ResultTerminationRequested; - } + KScopedSchedulerLockAndSleep slp{kernel_ctx, thread, timeout}; // Check if any of the objects are already signaled. for (auto i = 0; i < num_objects; ++i) { @@ -111,6 +48,12 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, return ResultTimedOut; } + // Check if the thread should terminate. + if (thread->IsTerminationRequested()) { + slp.CancelSleep(); + return ResultTerminationRequested; + } + // Check if waiting was canceled. if (thread->IsWaitCancelled()) { slp.CancelSleep(); @@ -123,25 +66,73 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, thread_nodes[i].thread = thread; thread_nodes[i].next = nullptr; - objects[i]->LinkNode(std::addressof(thread_nodes[i])); + if (objects[i]->thread_list_tail == nullptr) { + objects[i]->thread_list_head = std::addressof(thread_nodes[i]); + } else { + objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]); + } + + objects[i]->thread_list_tail = std::addressof(thread_nodes[i]); } - // Mark the thread as cancellable. + // For debugging only + thread->SetWaitObjectsForDebugging({objects, static_cast(num_objects)}); + + // Mark the thread as waiting. thread->SetCancellable(); - - // Clear the thread's synced index. - thread->SetSyncedIndex(-1); - - // Wait for an object to be signaled. - thread->BeginWait(std::addressof(wait_queue)); + thread->SetSyncedObject(nullptr, ResultTimedOut); + thread->SetState(ThreadState::Waiting); thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization); } - // Set the output index. - *out_index = thread->GetSyncedIndex(); + // The lock/sleep is done, so we should be able to get our result. + + // Thread is no longer cancellable. + thread->ClearCancellable(); + + // For debugging only + thread->SetWaitObjectsForDebugging({}); + + // Cancel the timer as needed. + kernel_ctx.TimeManager().UnscheduleTimeEvent(thread); // Get the wait result. - return thread->GetWaitResult(); + ResultCode wait_result{ResultSuccess}; + s32 sync_index = -1; + { + KScopedSchedulerLock lock(kernel_ctx); + KSynchronizationObject* synced_obj; + wait_result = thread->GetWaitResult(std::addressof(synced_obj)); + + for (auto i = 0; i < num_objects; ++i) { + // Unlink the object from the list. + ThreadListNode* prev_ptr = + reinterpret_cast(std::addressof(objects[i]->thread_list_head)); + ThreadListNode* prev_val = nullptr; + ThreadListNode *prev, *tail_prev; + + do { + prev = prev_ptr; + prev_ptr = prev_ptr->next; + tail_prev = prev_val; + prev_val = prev_ptr; + } while (prev_ptr != std::addressof(thread_nodes[i])); + + if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) { + objects[i]->thread_list_tail = tail_prev; + } + + prev->next = thread_nodes[i].next; + + if (objects[i] == synced_obj) { + sync_index = i; + } + } + } + + // Set output. + *out_index = sync_index; + return wait_result; } KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) @@ -150,7 +141,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) KSynchronizationObject::~KSynchronizationObject() = default; void KSynchronizationObject::NotifyAvailable(ResultCode result) { - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock lock(kernel); // If we're not signaled, we've nothing to notify. if (!this->IsSignaled()) { @@ -159,7 +150,11 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) { // Iterate over each thread. for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { - cur_node->thread->NotifyAvailable(this, result); + KThread* thread = cur_node->thread; + if (thread->GetState() == ThreadState::Waiting) { + thread->SetSyncedObject(this, result); + thread->SetState(ThreadState::Runnable); + } } } diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h index ec235437b..898e58e16 100755 --- a/src/core/hle/kernel/k_synchronization_object.h +++ b/src/core/hle/kernel/k_synchronization_object.h @@ -35,38 +35,6 @@ public: [[nodiscard]] std::vector GetWaitingThreadsForDebugging() const; - void LinkNode(ThreadListNode* node_) { - // Link the node to the list. - if (thread_list_tail == nullptr) { - thread_list_head = node_; - } else { - thread_list_tail->next = node_; - } - - thread_list_tail = node_; - } - - void UnlinkNode(ThreadListNode* node_) { - // Unlink the node from the list. - ThreadListNode* prev_ptr = - reinterpret_cast(std::addressof(thread_list_head)); - ThreadListNode* prev_val = nullptr; - ThreadListNode *prev, *tail_prev; - - do { - prev = prev_ptr; - prev_ptr = prev_ptr->next; - tail_prev = prev_val; - prev_val = prev_ptr; - } while (prev_ptr != node_); - - if (thread_list_tail == node_) { - thread_list_tail = tail_prev; - } - - prev->next = node_->next; - } - protected: explicit KSynchronizationObject(KernelCore& kernel); ~KSynchronizationObject() override; diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 03175e5c2..db65ce79a 100755 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -13,9 +13,6 @@ #include "common/common_types.h" #include "common/fiber.h" #include "common/logging/log.h" -#include "common/scope_exit.h" -#include "common/settings.h" -#include "common/thread_queue_list.h" #include "core/core.h" #include "core/cpu_manager.h" #include "core/hardware_properties.h" @@ -59,34 +56,6 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, namespace Kernel { -namespace { - -class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { -public: - explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_) - : KThreadQueueWithoutEndWait(kernel_) {} -}; - -class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue { -public: - explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl) - : KThreadQueue(kernel_), m_wait_list(wl) {} - - virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result, - bool cancel_timer_task) override { - // Remove the thread from the wait list. - m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread)); - - // Invoke the base cancel wait handler. - KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); - } - -private: - KThread::WaiterList* m_wait_list; -}; - -} // namespace - KThread::KThread(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {} KThread::~KThread() = default; @@ -113,8 +82,6 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s [[fallthrough]]; case ThreadType::HighPriority: [[fallthrough]]; - case ThreadType::Dummy: - [[fallthrough]]; case ThreadType::User: ASSERT(((owner == nullptr) || (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); @@ -160,8 +127,11 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s priority = prio; base_priority = prio; + // Set sync object and waiting lock to null. + synced_object = nullptr; + // Initialize sleeping queue. - wait_queue = nullptr; + sleeping_queue = nullptr; // Set suspend flags. suspend_request_flags = 0; @@ -214,7 +184,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s // Setup the stack parameters. StackParameters& sp = GetStackParameters(); sp.cur_thread = this; - sp.disable_count = 0; + sp.disable_count = 1; SetInExceptionHandler(); // Set thread ID. @@ -241,16 +211,15 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint // Initialize the thread. R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); - // Initialize emulation parameters. + // Initialize host context. thread->host_context = std::make_shared(std::move(init_func), init_func_parameter); - thread->is_single_core = !Settings::values.use_multi_core.GetValue(); return ResultSuccess; } ResultCode KThread::InitializeDummyThread(KThread* thread) { - return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy); + return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main); } ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { @@ -304,14 +273,11 @@ void KThread::Finalize() { auto it = waiter_list.begin(); while (it != waiter_list.end()) { - // Clear the lock owner + // The thread shouldn't be a kernel waiter. it->SetLockOwner(nullptr); - - // Erase the waiter from our list. + it->SetSyncedObject(nullptr, ResultInvalidState); + it->Wakeup(); it = waiter_list.erase(it); - - // Cancel the thread's wait. - it->CancelWait(ResultInvalidState, true); } } @@ -328,12 +294,15 @@ bool KThread::IsSignaled() const { return signaled; } -void KThread::OnTimer() { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); +void KThread::Wakeup() { + KScopedSchedulerLock sl{kernel}; - // If we're waiting, cancel the wait. if (GetState() == ThreadState::Waiting) { - wait_queue->CancelWait(this, ResultTimedOut, false); + if (sleeping_queue != nullptr) { + sleeping_queue->WakeupThread(this); + } else { + SetState(ThreadState::Runnable); + } } } @@ -358,7 +327,7 @@ void KThread::StartTermination() { // Signal. signaled = true; - KSynchronizationObject::NotifyAvailable(); + NotifyAvailable(); // Clear previous thread in KScheduler. KScheduler::ClearPreviousThread(kernel, this); @@ -506,32 +475,30 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m return ResultSuccess; } -ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { +ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) { ASSERT(parent != nullptr); ASSERT(v_affinity_mask != 0); - KScopedLightLock lk(activity_pause_lock); + KScopedLightLock lk{activity_pause_lock}; // Set the core mask. u64 p_affinity_mask = 0; { - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl{kernel}; ASSERT(num_core_migration_disables >= 0); - // If we're updating, set our ideal virtual core. - if (core_id_ != Svc::IdealCoreNoUpdate) { - virtual_ideal_core_id = core_id_; - } else { - // Preserve our ideal core id. - core_id_ = virtual_ideal_core_id; - R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination); + // If the core id is no-update magic, preserve the ideal core id. + if (cpu_core_id == Svc::IdealCoreNoUpdate) { + cpu_core_id = virtual_ideal_core_id; + R_UNLESS(((1ULL << cpu_core_id) & v_affinity_mask) != 0, ResultInvalidCombination); } - // Set our affinity mask. + // Set the virtual core/affinity mask. + virtual_ideal_core_id = cpu_core_id; virtual_affinity_mask = v_affinity_mask; // Translate the virtual core to a physical core. - if (core_id_ >= 0) { - core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_]; + if (cpu_core_id >= 0) { + cpu_core_id = Core::Hardware::VirtualToPhysicalCoreMap[cpu_core_id]; } // Translate the virtual affinity mask to a physical one. @@ -546,7 +513,7 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { const KAffinityMask old_mask = physical_affinity_mask; // Set our new ideals. - physical_ideal_core_id = core_id_; + physical_ideal_core_id = cpu_core_id; physical_affinity_mask.SetAffinityMask(p_affinity_mask); if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { @@ -564,18 +531,18 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { } } else { // Otherwise, we edit the original affinity for restoration later. - original_physical_ideal_core_id = core_id_; + original_physical_ideal_core_id = cpu_core_id; original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); } } // Update the pinned waiter list. - ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list)); { bool retry_update{}; + bool thread_is_pinned{}; do { // Lock the scheduler. - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl{kernel}; // Don't do any further management if our termination has been requested. R_SUCCEED_IF(IsTerminationRequested()); @@ -603,9 +570,12 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); + // Note that the thread was pinned. + thread_is_pinned = true; + // Wait until the thread isn't pinned any more. pinned_waiter_list.push_back(GetCurrentThread(kernel)); - GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); + GetCurrentThread(kernel).SetState(ThreadState::Waiting); } else { // If the thread isn't pinned, release the scheduler lock and retry until it's // not current. @@ -613,6 +583,16 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { } } } while (retry_update); + + // If the thread was pinned, it no longer is, and we should remove the current thread from + // our waiter list. + if (thread_is_pinned) { + // Lock the scheduler. + KScopedSchedulerLock sl{kernel}; + + // Remove from the list. + pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel))); + } } return ResultSuccess; @@ -661,9 +641,15 @@ void KThread::WaitCancel() { KScopedSchedulerLock sl{kernel}; // Check if we're waiting and cancellable. - if (this->GetState() == ThreadState::Waiting && cancellable) { - wait_cancelled = false; - wait_queue->CancelWait(this, ResultCancelled, true); + if (GetState() == ThreadState::Waiting && cancellable) { + if (sleeping_queue != nullptr) { + sleeping_queue->WakeupThread(this); + wait_cancelled = true; + } else { + SetSyncedObject(nullptr, ResultCancelled); + SetState(ThreadState::Runnable); + wait_cancelled = false; + } } else { // Otherwise, note that we cancelled a wait. wait_cancelled = true; @@ -714,59 +700,60 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) { // Set the activity. { // Lock the scheduler. - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl{kernel}; // Verify our state. - const auto cur_state = this->GetState(); + const auto cur_state = GetState(); R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable), ResultInvalidState); // Either pause or resume. if (activity == Svc::ThreadActivity::Paused) { // Verify that we're not suspended. - R_UNLESS(!this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState); + R_UNLESS(!IsSuspendRequested(SuspendType::Thread), ResultInvalidState); // Suspend. - this->RequestSuspend(SuspendType::Thread); + RequestSuspend(SuspendType::Thread); } else { ASSERT(activity == Svc::ThreadActivity::Runnable); // Verify that we're suspended. - R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState); + R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); // Resume. - this->Resume(SuspendType::Thread); + Resume(SuspendType::Thread); } } // If the thread is now paused, update the pinned waiter list. if (activity == Svc::ThreadActivity::Paused) { - ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, - std::addressof(pinned_waiter_list)); - - bool thread_is_current; + bool thread_is_pinned{}; + bool thread_is_current{}; do { // Lock the scheduler. - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl{kernel}; // Don't do any further management if our termination has been requested. - R_SUCCEED_IF(this->IsTerminationRequested()); - - // By default, treat the thread as not current. - thread_is_current = false; + R_SUCCEED_IF(IsTerminationRequested()); // Check whether the thread is pinned. - if (this->GetStackParameters().is_pinned) { + if (GetStackParameters().is_pinned) { // Verify that the current thread isn't terminating. R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); + // Note that the thread was pinned and not current. + thread_is_pinned = true; + thread_is_current = false; + // Wait until the thread isn't pinned any more. pinned_waiter_list.push_back(GetCurrentThread(kernel)); - GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); + GetCurrentThread(kernel).SetState(ThreadState::Waiting); } else { // Check if the thread is currently running. // If it is, we'll need to retry. + thread_is_current = false; + for (auto i = 0; i < static_cast(Core::Hardware::NUM_CPU_CORES); ++i) { if (kernel.Scheduler(i).GetCurrentThread() == this) { thread_is_current = true; @@ -775,6 +762,16 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) { } } } while (thread_is_current); + + // If the thread was pinned, it no longer is, and we should remove the current thread from + // our waiter list. + if (thread_is_pinned) { + // Lock the scheduler. + KScopedSchedulerLock sl{kernel}; + + // Remove from the list. + pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel))); + } } return ResultSuccess; @@ -969,9 +966,6 @@ ResultCode KThread::Run() { // Set our state and finish. SetState(ThreadState::Runnable); - - DisableDispatch(); - return ResultSuccess; } } @@ -1002,63 +996,29 @@ ResultCode KThread::Sleep(s64 timeout) { ASSERT(this == GetCurrentThreadPointer(kernel)); ASSERT(timeout > 0); - ThreadQueueImplForKThreadSleep wait_queue_(kernel); { // Setup the scheduling lock and sleep. - KScopedSchedulerLockAndSleep slp(kernel, this, timeout); + KScopedSchedulerLockAndSleep slp{kernel, this, timeout}; // Check if the thread should terminate. - if (this->IsTerminationRequested()) { + if (IsTerminationRequested()) { slp.CancelSleep(); return ResultTerminationRequested; } - // Wait for the sleep to end. - this->BeginWait(std::addressof(wait_queue_)); + // Mark the thread as waiting. + SetState(ThreadState::Waiting); SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); } + // The lock/sleep is done. + + // Cancel the timer. + kernel.TimeManager().UnscheduleTimeEvent(this); + return ResultSuccess; } -void KThread::BeginWait(KThreadQueue* queue) { - // Set our state as waiting. - SetState(ThreadState::Waiting); - - // Set our wait queue. - wait_queue = queue; -} - -void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) { - // Lock the scheduler. - KScopedSchedulerLock sl(kernel); - - // If we're waiting, notify our queue that we're available. - if (GetState() == ThreadState::Waiting) { - wait_queue->NotifyAvailable(this, signaled_object, wait_result_); - } -} - -void KThread::EndWait(ResultCode wait_result_) { - // Lock the scheduler. - KScopedSchedulerLock sl(kernel); - - // If we're waiting, notify our queue that we're available. - if (GetState() == ThreadState::Waiting) { - wait_queue->EndWait(this, wait_result_); - } -} - -void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) { - // Lock the scheduler. - KScopedSchedulerLock sl(kernel); - - // If we're waiting, notify our queue that we're available. - if (GetState() == ThreadState::Waiting) { - wait_queue->CancelWait(this, wait_result_, cancel_timer_task); - } -} - void KThread::SetState(ThreadState state) { KScopedSchedulerLock sl{kernel}; @@ -1090,26 +1050,4 @@ s32 GetCurrentCoreId(KernelCore& kernel) { return GetCurrentThread(kernel).GetCurrentCore(); } -KScopedDisableDispatch::~KScopedDisableDispatch() { - // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { - return; - } - - // Skip the reschedule if single-core, as dispatch tracking is disabled here. - if (!Settings::values.use_multi_core.GetValue()) { - return; - } - - if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { - auto scheduler = kernel.CurrentScheduler(); - - if (scheduler) { - scheduler->RescheduleCurrentCore(); - } - } else { - GetCurrentThread(kernel).EnableDispatch(); - } -} - } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index c8a08bd71..c77f44ad4 100755 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -48,7 +48,6 @@ enum class ThreadType : u32 { Kernel = 1, HighPriority = 2, User = 3, - Dummy = 100, // Special thread type for emulation purposes only }; DECLARE_ENUM_FLAG_OPERATORS(ThreadType); @@ -162,6 +161,8 @@ public: } } + void Wakeup(); + void SetBasePriority(s32 value); [[nodiscard]] ResultCode Run(); @@ -196,19 +197,13 @@ public: void Suspend(); - constexpr void SetSyncedIndex(s32 index) { - synced_index = index; - } - - [[nodiscard]] constexpr s32 GetSyncedIndex() const { - return synced_index; - } - - constexpr void SetWaitResult(ResultCode wait_res) { + void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) { + synced_object = obj; wait_result = wait_res; } - [[nodiscard]] constexpr ResultCode GetWaitResult() const { + [[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const { + *out = synced_object; return wait_result; } @@ -379,8 +374,6 @@ public: [[nodiscard]] bool IsSignaled() const override; - void OnTimer(); - static void PostDestroy(uintptr_t arg); [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread); @@ -453,39 +446,20 @@ public: return per_core_priority_queue_entry[core]; } - [[nodiscard]] bool IsKernelThread() const { - return GetActiveCore() == 3; - } - - [[nodiscard]] bool IsDispatchTrackingDisabled() const { - return is_single_core || IsKernelThread(); + void SetSleepingQueue(KThreadQueue* q) { + sleeping_queue = q; } [[nodiscard]] s32 GetDisableDispatchCount() const { - if (IsDispatchTrackingDisabled()) { - // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. - return 1; - } - return this->GetStackParameters().disable_count; } void DisableDispatch() { - if (IsDispatchTrackingDisabled()) { - // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. - return; - } - ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); this->GetStackParameters().disable_count++; } void EnableDispatch() { - if (IsDispatchTrackingDisabled()) { - // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. - return; - } - ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); this->GetStackParameters().disable_count--; } @@ -599,15 +573,6 @@ public: address_key_value = val; } - void ClearWaitQueue() { - wait_queue = nullptr; - } - - void BeginWait(KThreadQueue* queue); - void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_); - void EndWait(ResultCode wait_result_); - void CancelWait(ResultCode wait_result_, bool cancel_timer_task); - [[nodiscard]] bool HasWaiters() const { return !waiter_list.empty(); } @@ -702,6 +667,7 @@ private: KAffinityMask physical_affinity_mask{}; u64 thread_id{}; std::atomic cpu_time{}; + KSynchronizationObject* synced_object{}; VAddr address_key{}; KProcess* parent{}; VAddr kernel_stack_top{}; @@ -711,14 +677,13 @@ private: s64 schedule_count{}; s64 last_scheduled_tick{}; std::array per_core_priority_queue_entry{}; - KThreadQueue* wait_queue{}; + KThreadQueue* sleeping_queue{}; WaiterList waiter_list{}; WaiterList pinned_waiter_list{}; KThread* lock_owner{}; u32 address_key_value{}; u32 suspend_request_flags{}; u32 suspend_allowed_flags{}; - s32 synced_index{}; ResultCode wait_result{ResultSuccess}; s32 base_priority{}; s32 physical_ideal_core_id{}; @@ -743,7 +708,6 @@ private: // For emulation std::shared_ptr host_context{}; - bool is_single_core{}; // For debugging std::vector wait_objects_for_debugging; @@ -788,20 +752,4 @@ public: } }; -class KScopedDisableDispatch { -public: - [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { - // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { - return; - } - GetCurrentThread(kernel).DisableDispatch(); - } - - ~KScopedDisableDispatch(); - -private: - KernelCore& kernel; -}; - } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h index 1f13cde83..35d471dc5 100755 --- a/src/core/hle/kernel/k_thread_queue.h +++ b/src/core/hle/kernel/k_thread_queue.h @@ -4,7 +4,6 @@ #pragma once -#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_thread.h" namespace Kernel { @@ -12,24 +11,71 @@ namespace Kernel { class KThreadQueue { public: explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {} - virtual ~KThreadQueue() = default; - virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object, - ResultCode wait_result); - virtual void EndWait(KThread* waiting_thread, ResultCode wait_result); - virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result, - bool cancel_timer_task); + bool IsEmpty() const { + return wait_list.empty(); + } + + KThread::WaiterList::iterator begin() { + return wait_list.begin(); + } + KThread::WaiterList::iterator end() { + return wait_list.end(); + } + + bool SleepThread(KThread* t) { + KScopedSchedulerLock sl{kernel}; + + // If the thread needs terminating, don't enqueue it. + if (t->IsTerminationRequested()) { + return false; + } + + // Set the thread's queue and mark it as waiting. + t->SetSleepingQueue(this); + t->SetState(ThreadState::Waiting); + + // Add the thread to the queue. + wait_list.push_back(*t); + + return true; + } + + void WakeupThread(KThread* t) { + KScopedSchedulerLock sl{kernel}; + + // Remove the thread from the queue. + wait_list.erase(wait_list.iterator_to(*t)); + + // Mark the thread as no longer sleeping. + t->SetState(ThreadState::Runnable); + t->SetSleepingQueue(nullptr); + } + + KThread* WakeupFrontThread() { + KScopedSchedulerLock sl{kernel}; + + if (wait_list.empty()) { + return nullptr; + } else { + // Remove the thread from the queue. + auto it = wait_list.begin(); + KThread* thread = std::addressof(*it); + wait_list.erase(it); + + ASSERT(thread->GetState() == ThreadState::Waiting); + + // Mark the thread as no longer sleeping. + thread->SetState(ThreadState::Runnable); + thread->SetSleepingQueue(nullptr); + + return thread; + } + } private: KernelCore& kernel; KThread::WaiterList wait_list{}; }; -class KThreadQueueWithoutEndWait : public KThreadQueue { -public: - explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {} - - virtual void EndWait(KThread* waiting_thread, ResultCode wait_result) override final; -}; - } // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 2e4e4cb1c..45e86a677 100755 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -14,7 +14,6 @@ #include "common/assert.h" #include "common/logging/log.h" #include "common/microprofile.h" -#include "common/scope_exit.h" #include "common/thread.h" #include "common/thread_worker.h" #include "core/arm/arm_interface.h" @@ -84,16 +83,12 @@ struct KernelCore::Impl { } void InitializeCores() { - for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - cores[core_id].Initialize(current_process->Is64BitProcess()); - system.Memory().SetCurrentPageTable(*current_process, core_id); + for (auto& core : cores) { + core.Initialize(current_process->Is64BitProcess()); } } void Shutdown() { - is_shutting_down.store(true, std::memory_order_relaxed); - SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); - process_list.clear(); // Close all open server ports. @@ -128,6 +123,15 @@ struct KernelCore::Impl { next_user_process_id = KProcess::ProcessIDMin; next_thread_id = 1; + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + if (suspend_threads[core_id]) { + suspend_threads[core_id]->Close(); + suspend_threads[core_id] = nullptr; + } + + schedulers[core_id].reset(); + } + cores.clear(); global_handle_table->Finalize(); @@ -155,16 +159,6 @@ struct KernelCore::Impl { CleanupObject(time_shared_mem); CleanupObject(system_resource_limit); - for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - if (suspend_threads[core_id]) { - suspend_threads[core_id]->Close(); - suspend_threads[core_id] = nullptr; - } - - schedulers[core_id]->Finalize(); - schedulers[core_id].reset(); - } - // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others next_host_thread_id = Core::Hardware::NUM_CPU_CORES; @@ -251,11 +245,13 @@ struct KernelCore::Impl { KScopedSchedulerLock lock(kernel); global_scheduler_context->PreemptThreads(); } - const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)}; + const auto time_interval = std::chrono::nanoseconds{ + Core::Timing::msToCycles(std::chrono::milliseconds(10))}; system.CoreTiming().ScheduleEvent(time_interval, preemption_event); }); - const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)}; + const auto time_interval = + std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))}; system.CoreTiming().ScheduleEvent(time_interval, preemption_event); } @@ -271,6 +267,14 @@ struct KernelCore::Impl { void MakeCurrentProcess(KProcess* process) { current_process = process; + if (process == nullptr) { + return; + } + + const u32 core_id = GetCurrentHostThreadID(); + if (core_id < Core::Hardware::NUM_CPU_CORES) { + system.Memory().SetCurrentPageTable(*process, core_id); + } } static inline thread_local u32 host_thread_id = UINT32_MAX; @@ -340,16 +344,7 @@ struct KernelCore::Impl { is_phantom_mode_for_singlecore = value; } - bool IsShuttingDown() const { - return is_shutting_down.load(std::memory_order_relaxed); - } - KThread* GetCurrentEmuThread() { - // If we are shutting down the kernel, none of this is relevant anymore. - if (IsShuttingDown()) { - return {}; - } - const auto thread_id = GetCurrentHostThreadID(); if (thread_id >= Core::Hardware::NUM_CPU_CORES) { return GetHostDummyThread(); @@ -765,7 +760,6 @@ struct KernelCore::Impl { std::vector> dummy_threads; bool is_multicore{}; - std::atomic_bool is_shutting_down{}; bool is_phantom_mode_for_singlecore{}; u32 single_core_thread_id{}; @@ -851,20 +845,16 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { return impl->cores[id]; } -size_t KernelCore::CurrentPhysicalCoreIndex() const { - const u32 core_id = impl->GetCurrentHostThreadID(); - if (core_id >= Core::Hardware::NUM_CPU_CORES) { - return Core::Hardware::NUM_CPU_CORES - 1; - } - return core_id; -} - Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { - return impl->cores[CurrentPhysicalCoreIndex()]; + u32 core_id = impl->GetCurrentHostThreadID(); + ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); + return impl->cores[core_id]; } const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { - return impl->cores[CurrentPhysicalCoreIndex()]; + u32 core_id = impl->GetCurrentHostThreadID(); + ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); + return impl->cores[core_id]; } Kernel::KScheduler* KernelCore::CurrentScheduler() { @@ -1067,9 +1057,6 @@ void KernelCore::Suspend(bool in_suspention) { impl->suspend_threads[core_id]->SetState(state); impl->suspend_threads[core_id]->SetWaitReasonForDebugging( ThreadWaitReasonForDebugging::Suspended); - if (!should_suspend) { - impl->suspend_threads[core_id]->DisableDispatch(); - } } } } @@ -1078,21 +1065,19 @@ bool KernelCore::IsMulticore() const { return impl->is_multicore; } -bool KernelCore::IsShuttingDown() const { - return impl->IsShuttingDown(); -} - void KernelCore::ExceptionalExit() { exception_exited = true; Suspend(true); } void KernelCore::EnterSVCProfile() { - impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); + std::size_t core = impl->GetCurrentHostThreadID(); + impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); } void KernelCore::ExitSVCProfile() { - MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); + std::size_t core = impl->GetCurrentHostThreadID(); + MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); } std::weak_ptr KernelCore::CreateServiceThread(const std::string& name) { diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index eacf9dc61..d2ceae950 100755 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -148,9 +148,6 @@ public: /// Gets the an instance of the respective physical CPU core. const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; - /// Gets the current physical core index for the running host thread. - std::size_t CurrentPhysicalCoreIndex() const; - /// Gets the sole instance of the Scheduler at the current running core. Kernel::KScheduler* CurrentScheduler(); @@ -274,8 +271,6 @@ public: bool IsMulticore() const; - bool IsShuttingDown() const; - void EnterSVCProfile(); void ExitSVCProfile(); diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index 00657bc4c..6721b6276 100755 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp @@ -25,27 +25,24 @@ public: void QueueSyncRequest(KSession& session, std::shared_ptr&& context); private: - std::vector threads; + std::vector threads; std::queue> requests; std::mutex queue_mutex; - std::condition_variable_any condition; + std::condition_variable condition; const std::string service_name; + bool stop{}; }; ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name) : service_name{name} { - for (std::size_t i = 0; i < num_threads; ++i) { - threads.emplace_back([this, &kernel](std::stop_token stop_token) { + for (std::size_t i = 0; i < num_threads; ++i) + threads.emplace_back([this, &kernel] { Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str()); // Wait for first request before trying to acquire a render context { std::unique_lock lock{queue_mutex}; - condition.wait(lock, stop_token, [this] { return !requests.empty(); }); - } - - if (stop_token.stop_requested()) { - return; + condition.wait(lock, [this] { return stop || !requests.empty(); }); } kernel.RegisterHostThread(); @@ -55,16 +52,10 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std { std::unique_lock lock{queue_mutex}; - condition.wait(lock, stop_token, [this] { return !requests.empty(); }); - - if (stop_token.stop_requested()) { + condition.wait(lock, [this] { return stop || !requests.empty(); }); + if (stop || requests.empty()) { return; } - - if (requests.empty()) { - continue; - } - task = std::move(requests.front()); requests.pop(); } @@ -72,7 +63,6 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std task(); } }); - } } void ServiceThread::Impl::QueueSyncRequest(KSession& session, @@ -97,7 +87,16 @@ void ServiceThread::Impl::QueueSyncRequest(KSession& session, condition.notify_one(); } -ServiceThread::Impl::~Impl() = default; +ServiceThread::Impl::~Impl() { + { + std::unique_lock lock{queue_mutex}; + stop = true; + } + condition.notify_all(); + for (std::thread& thread : threads) { + thread.join(); + } +} ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name) : impl{std::make_unique(kernel, num_threads, name)} {} diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 359cf515d..f0cd8471e 100755 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -31,7 +31,6 @@ #include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" -#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/k_transfer_memory.h" #include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/kernel.h" @@ -308,29 +307,26 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle, /// Makes a blocking IPC call to an OS service. static ResultCode SendSyncRequest(Core::System& system, Handle handle) { + auto& kernel = system.Kernel(); - // Create the wait queue. - KThreadQueue wait_queue(kernel); - - // Get the client session from its handle. - KScopedAutoObject session = - kernel.CurrentProcess()->GetHandleTable().GetObject(handle); - R_UNLESS(session.IsNotNull(), ResultInvalidHandle); - - LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); - auto thread = kernel.CurrentScheduler()->GetCurrentThread(); { KScopedSchedulerLock lock(kernel); + thread->SetState(ThreadState::Waiting); + thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); - // This is a synchronous request, so we should wait for our request to complete. - GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue)); - GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); - session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming()); + { + KScopedAutoObject session = + kernel.CurrentProcess()->GetHandleTable().GetObject(handle); + R_UNLESS(session.IsNotNull(), ResultInvalidHandle); + LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); + session->SendSyncRequest(thread, system.Memory(), system.CoreTiming()); + } } - return thread->GetWaitResult(); + KSynchronizationObject* dummy{}; + return thread->GetWaitResult(std::addressof(dummy)); } static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { @@ -877,7 +873,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle const u64 thread_ticks = current_thread->GetCpuTime(); out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); - } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { + } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; } @@ -891,8 +887,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle return ResultInvalidHandle; } - if (info_sub_id != 0xFFFFFFFFFFFFFFFF && - info_sub_id != system.Kernel().CurrentPhysicalCoreIndex()) { + if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id != system.CurrentCoreIndex()) { LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id); return ResultInvalidCombination; } diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index aa985d820..8cd7279a3 100755 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -5,7 +5,6 @@ #include "common/assert.h" #include "core/core.h" #include "core/core_timing.h" -#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/time_manager.h" @@ -16,10 +15,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { Core::Timing::CreateEvent("Kernel::TimeManagerCallback", [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { KThread* thread = reinterpret_cast(thread_handle); - { - KScopedSchedulerLock sl(system.Kernel()); - thread->OnTimer(); - } + thread->Wakeup(); }); } diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp index 68c9240ae..3c36f4085 100755 --- a/src/core/hle/service/friend/friend.cpp +++ b/src/core/hle/service/friend/friend.cpp @@ -17,10 +17,11 @@ namespace Service::Friend { class IFriendService final : public ServiceFramework { public: - explicit IFriendService(Core::System& system_) : ServiceFramework{system_, "IFriendService"} { + explicit IFriendService(Core::System& system_) + : ServiceFramework{system_, "IFriendService"}, service_context{system, "IFriendService"} { // clang-format off static const FunctionInfo functions[] = { - {0, nullptr, "GetCompletionEvent"}, + {0, &IFriendService::GetCompletionEvent, "GetCompletionEvent"}, {1, nullptr, "Cancel"}, {10100, nullptr, "GetFriendListIds"}, {10101, &IFriendService::GetFriendList, "GetFriendList"}, @@ -109,6 +110,12 @@ public: // clang-format on RegisterHandlers(functions); + + completion_event = service_context.CreateEvent("IFriendService:CompletionEvent"); + } + + ~IFriendService() override { + service_context.CloseEvent(completion_event); } private: @@ -129,6 +136,14 @@ private: }; static_assert(sizeof(SizedFriendFilter) == 0x10, "SizedFriendFilter is an invalid size"); + void GetCompletionEvent(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_Friend, "called"); + + IPC::ResponseBuilder rb{ctx, 2, 1}; + rb.Push(ResultSuccess); + rb.PushCopyObjects(completion_event->GetReadableEvent()); + } + void GetBlockedUserListIds(Kernel::HLERequestContext& ctx) { // This is safe to stub, as there should be no adverse consequences from reporting no // blocked users. @@ -179,6 +194,10 @@ private: rb.Push(0); // Friend count // TODO(ogniK): Return a buffer of u64s which are the "NetworkServiceAccountId" } + + KernelHelpers::ServiceContext service_context; + + Kernel::KEvent* completion_event; }; class INotificationService final : public ServiceFramework {