early-access version 2032
This commit is contained in:
		| @@ -1,7 +1,7 @@ | ||||
| yuzu emulator early access | ||||
| ============= | ||||
|  | ||||
| This is the source code for early-access 2031. | ||||
| This is the source code for early-access 2032. | ||||
|  | ||||
| ## Legal Notice | ||||
|  | ||||
|   | ||||
| @@ -507,12 +507,6 @@ const ARM_Interface& System::CurrentArmInterface() const { | ||||
|     return impl->kernel.CurrentPhysicalCore().ArmInterface(); | ||||
| } | ||||
|  | ||||
| std::size_t System::CurrentCoreIndex() const { | ||||
|     std::size_t core = impl->kernel.GetCurrentHostThreadID(); | ||||
|     ASSERT(core < Core::Hardware::NUM_CPU_CORES); | ||||
|     return core; | ||||
| } | ||||
|  | ||||
| Kernel::PhysicalCore& System::CurrentPhysicalCore() { | ||||
|     return impl->kernel.CurrentPhysicalCore(); | ||||
| } | ||||
|   | ||||
| @@ -205,9 +205,6 @@ public: | ||||
|     /// Gets an ARM interface to the CPU core that is currently running | ||||
|     [[nodiscard]] const ARM_Interface& CurrentArmInterface() const; | ||||
|  | ||||
|     /// Gets the index of the currently running CPU core | ||||
|     [[nodiscard]] std::size_t CurrentCoreIndex() const; | ||||
|  | ||||
|     /// Gets the physical core for the CPU core that is currently running | ||||
|     [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); | ||||
|  | ||||
|   | ||||
| @@ -21,34 +21,25 @@ namespace Core { | ||||
| CpuManager::CpuManager(System& system_) : system{system_} {} | ||||
| CpuManager::~CpuManager() = default; | ||||
|  | ||||
| void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) { | ||||
|     cpu_manager.RunThread(core); | ||||
| void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, | ||||
|                              std::size_t core) { | ||||
|     cpu_manager.RunThread(stop_token, core); | ||||
| } | ||||
|  | ||||
| void CpuManager::Initialize() { | ||||
|     running_mode = true; | ||||
|     if (is_multicore) { | ||||
|         for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||||
|             core_data[core].host_thread = | ||||
|                 std::make_unique<std::thread>(ThreadStart, std::ref(*this), core); | ||||
|             core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core); | ||||
|         } | ||||
|     } else { | ||||
|         core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0); | ||||
|         core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::Shutdown() { | ||||
|     running_mode = false; | ||||
|     Pause(false); | ||||
|     if (is_multicore) { | ||||
|         for (auto& data : core_data) { | ||||
|             data.host_thread->join(); | ||||
|             data.host_thread.reset(); | ||||
|         } | ||||
|     } else { | ||||
|         core_data[0].host_thread->join(); | ||||
|         core_data[0].host_thread.reset(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() { | ||||
| @@ -127,17 +118,18 @@ void CpuManager::MultiCoreRunGuestLoop() { | ||||
|             physical_core = &kernel.CurrentPhysicalCore(); | ||||
|         } | ||||
|         system.ExitDynarmicProfile(); | ||||
|         physical_core->ArmInterface().ClearExclusiveState(); | ||||
|         kernel.CurrentScheduler()->RescheduleCurrentCore(); | ||||
|         { | ||||
|             Kernel::KScopedDisableDispatch dd(kernel); | ||||
|             physical_core->ArmInterface().ClearExclusiveState(); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::MultiCoreRunIdleThread() { | ||||
|     auto& kernel = system.Kernel(); | ||||
|     while (true) { | ||||
|         auto& physical_core = kernel.CurrentPhysicalCore(); | ||||
|         physical_core.Idle(); | ||||
|         kernel.CurrentScheduler()->RescheduleCurrentCore(); | ||||
|         Kernel::KScopedDisableDispatch dd(kernel); | ||||
|         kernel.CurrentPhysicalCore().Idle(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -145,12 +137,12 @@ void CpuManager::MultiCoreRunSuspendThread() { | ||||
|     auto& kernel = system.Kernel(); | ||||
|     kernel.CurrentScheduler()->OnThreadStart(); | ||||
|     while (true) { | ||||
|         auto core = kernel.GetCurrentHostThreadID(); | ||||
|         auto core = kernel.CurrentPhysicalCoreIndex(); | ||||
|         auto& scheduler = *kernel.CurrentScheduler(); | ||||
|         Kernel::KThread* current_thread = scheduler.GetCurrentThread(); | ||||
|         Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); | ||||
|         ASSERT(scheduler.ContextSwitchPending()); | ||||
|         ASSERT(core == kernel.GetCurrentHostThreadID()); | ||||
|         ASSERT(core == kernel.CurrentPhysicalCoreIndex()); | ||||
|         scheduler.RescheduleCurrentCore(); | ||||
|     } | ||||
| } | ||||
| @@ -317,7 +309,7 @@ void CpuManager::Pause(bool paused) { | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::RunThread(std::size_t core) { | ||||
| void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { | ||||
|     /// Initialization | ||||
|     system.RegisterCoreThread(core); | ||||
|     std::string name; | ||||
| @@ -356,8 +348,8 @@ void CpuManager::RunThread(std::size_t core) { | ||||
|             sc_sync_first_use = false; | ||||
|         } | ||||
|  | ||||
|         // Abort if emulation was killed before the session really starts | ||||
|         if (!system.IsPoweredOn()) { | ||||
|         // Emulation was stopped | ||||
|         if (stop_token.stop_requested()) { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|   | ||||
| @@ -78,9 +78,9 @@ private: | ||||
|     void SingleCoreRunSuspendThread(); | ||||
|     void SingleCorePause(bool paused); | ||||
|  | ||||
|     static void ThreadStart(CpuManager& cpu_manager, std::size_t core); | ||||
|     static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); | ||||
|  | ||||
|     void RunThread(std::size_t core); | ||||
|     void RunThread(std::stop_token stop_token, std::size_t core); | ||||
|  | ||||
|     struct CoreData { | ||||
|         std::shared_ptr<Common::Fiber> host_context; | ||||
| @@ -89,7 +89,7 @@ private: | ||||
|         std::atomic<bool> is_running; | ||||
|         std::atomic<bool> is_paused; | ||||
|         std::atomic<bool> initialized; | ||||
|         std::unique_ptr<std::thread> host_thread; | ||||
|         std::jthread host_thread; | ||||
|     }; | ||||
|  | ||||
|     std::atomic<bool> running_mode{}; | ||||
|   | ||||
| @@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) { | ||||
|  | ||||
| bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { | ||||
|     auto& monitor = system.Monitor(); | ||||
|     const auto current_core = system.CurrentCoreIndex(); | ||||
|     const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | ||||
|  | ||||
|     // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | ||||
|     // TODO(bunnei): We should call CanAccessAtomic(..) here. | ||||
| @@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu | ||||
|  | ||||
| bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { | ||||
|     auto& monitor = system.Monitor(); | ||||
|     const auto current_core = system.CurrentCoreIndex(); | ||||
|     const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | ||||
|  | ||||
|     // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | ||||
|     // TODO(bunnei): We should call CanAccessAtomic(..) here. | ||||
|   | ||||
| @@ -170,6 +170,10 @@ public: | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     const std::string& GetName() const { | ||||
|         return name; | ||||
|     } | ||||
|  | ||||
| private: | ||||
|     void RegisterWithKernel(); | ||||
|     void UnregisterWithKernel(); | ||||
|   | ||||
| @@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) { | ||||
| bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, | ||||
|                       u32 new_orr_mask) { | ||||
|     auto& monitor = system.Monitor(); | ||||
|     const auto current_core = system.CurrentCoreIndex(); | ||||
|     const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | ||||
|  | ||||
|     // Load the value from the address. | ||||
|     const auto expected = monitor.ExclusiveRead32(current_core, address); | ||||
|   | ||||
| @@ -13,6 +13,7 @@ ResultCode KHandleTable::Finalize() { | ||||
|     // Get the table and clear our record of it. | ||||
|     u16 saved_table_size = 0; | ||||
|     { | ||||
|         KScopedDisableDispatch dd(kernel); | ||||
|         KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|         std::swap(m_table_size, saved_table_size); | ||||
| @@ -43,6 +44,7 @@ bool KHandleTable::Remove(Handle handle) { | ||||
|     // Find the object and free the entry. | ||||
|     KAutoObject* obj = nullptr; | ||||
|     { | ||||
|         KScopedDisableDispatch dd(kernel); | ||||
|         KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|         if (this->IsValidHandle(handle)) { | ||||
| @@ -61,6 +63,7 @@ bool KHandleTable::Remove(Handle handle) { | ||||
| } | ||||
|  | ||||
| ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | ||||
|     KScopedDisableDispatch dd(kernel); | ||||
|     KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|     // Never exceed our capacity. | ||||
| @@ -83,6 +86,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | ||||
| } | ||||
|  | ||||
| ResultCode KHandleTable::Reserve(Handle* out_handle) { | ||||
|     KScopedDisableDispatch dd(kernel); | ||||
|     KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|     // Never exceed our capacity. | ||||
| @@ -93,6 +97,7 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) { | ||||
| } | ||||
|  | ||||
| void KHandleTable::Unreserve(Handle handle) { | ||||
|     KScopedDisableDispatch dd(kernel); | ||||
|     KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|     // Unpack the handle. | ||||
| @@ -111,6 +116,7 @@ void KHandleTable::Unreserve(Handle handle) { | ||||
| } | ||||
|  | ||||
| void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { | ||||
|     KScopedDisableDispatch dd(kernel); | ||||
|     KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|     // Unpack the handle. | ||||
|   | ||||
| @@ -69,6 +69,7 @@ public: | ||||
|     template <typename T = KAutoObject> | ||||
|     KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | ||||
|         // Lock and look up in table. | ||||
|         KScopedDisableDispatch dd(kernel); | ||||
|         KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|         if constexpr (std::is_same_v<T, KAutoObject>) { | ||||
| @@ -123,6 +124,7 @@ public: | ||||
|         size_t num_opened; | ||||
|         { | ||||
|             // Lock the table. | ||||
|             KScopedDisableDispatch dd(kernel); | ||||
|             KScopedSpinLock lk(m_lock); | ||||
|             for (num_opened = 0; num_opened < num_handles; num_opened++) { | ||||
|                 // Get the current handle. | ||||
|   | ||||
| @@ -59,6 +59,7 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority | ||||
|     thread->GetContext64().cpu_registers[0] = 0; | ||||
|     thread->GetContext32().cpu_registers[1] = thread_handle; | ||||
|     thread->GetContext64().cpu_registers[1] = thread_handle; | ||||
|     thread->DisableDispatch(); | ||||
|  | ||||
|     auto& kernel = system.Kernel(); | ||||
|     // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires | ||||
|   | ||||
| @@ -376,20 +376,18 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { | ||||
| } | ||||
|  | ||||
| void KScheduler::DisableScheduling(KernelCore& kernel) { | ||||
|     if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { | ||||
|         ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); | ||||
|         scheduler->GetCurrentThread()->DisableDispatch(); | ||||
|     } | ||||
|     ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); | ||||
|     GetCurrentThreadPointer(kernel)->DisableDispatch(); | ||||
| } | ||||
|  | ||||
| void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | ||||
|     if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { | ||||
|         ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); | ||||
|         if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { | ||||
|             scheduler->GetCurrentThread()->EnableDispatch(); | ||||
|         } | ||||
|     ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1); | ||||
|  | ||||
|     if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) { | ||||
|         GetCurrentThreadPointer(kernel)->EnableDispatch(); | ||||
|     } else { | ||||
|         RescheduleCores(kernel, cores_needing_scheduling); | ||||
|     } | ||||
|     RescheduleCores(kernel, cores_needing_scheduling); | ||||
| } | ||||
|  | ||||
| u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | ||||
| @@ -617,13 +615,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c | ||||
|     state.highest_priority_thread = nullptr; | ||||
| } | ||||
|  | ||||
| KScheduler::~KScheduler() { | ||||
| void KScheduler::Finalize() { | ||||
|     if (idle_thread) { | ||||
|         idle_thread->Close(); | ||||
|         idle_thread = nullptr; | ||||
|     } | ||||
| } | ||||
|  | ||||
| KScheduler::~KScheduler() { | ||||
|     ASSERT(!idle_thread); | ||||
| } | ||||
|  | ||||
| KThread* KScheduler::GetCurrentThread() const { | ||||
|     if (auto result = current_thread.load(); result) { | ||||
|         return result; | ||||
| @@ -642,10 +644,12 @@ void KScheduler::RescheduleCurrentCore() { | ||||
|     if (phys_core.IsInterrupted()) { | ||||
|         phys_core.ClearInterrupt(); | ||||
|     } | ||||
|  | ||||
|     guard.Lock(); | ||||
|     if (state.needs_scheduling.load()) { | ||||
|         Schedule(); | ||||
|     } else { | ||||
|         GetCurrentThread()->EnableDispatch(); | ||||
|         guard.Unlock(); | ||||
|     } | ||||
| } | ||||
| @@ -655,26 +659,33 @@ void KScheduler::OnThreadStart() { | ||||
| } | ||||
|  | ||||
| void KScheduler::Unload(KThread* thread) { | ||||
|     ASSERT(thread); | ||||
|  | ||||
|     LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); | ||||
|  | ||||
|     if (thread) { | ||||
|         if (thread->IsCallingSvc()) { | ||||
|             thread->ClearIsCallingSvc(); | ||||
|         } | ||||
|         if (!thread->IsTerminationRequested()) { | ||||
|             prev_thread = thread; | ||||
|  | ||||
|             Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | ||||
|             cpu_core.SaveContext(thread->GetContext32()); | ||||
|             cpu_core.SaveContext(thread->GetContext64()); | ||||
|             // Save the TPIDR_EL0 system register in case it was modified. | ||||
|             thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||||
|             cpu_core.ClearExclusiveState(); | ||||
|         } else { | ||||
|             prev_thread = nullptr; | ||||
|         } | ||||
|         thread->context_guard.Unlock(); | ||||
|     if (thread->IsCallingSvc()) { | ||||
|         thread->ClearIsCallingSvc(); | ||||
|     } | ||||
|  | ||||
|     auto& physical_core = system.Kernel().PhysicalCore(core_id); | ||||
|     if (!physical_core.IsInitialized()) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); | ||||
|     cpu_core.SaveContext(thread->GetContext32()); | ||||
|     cpu_core.SaveContext(thread->GetContext64()); | ||||
|     // Save the TPIDR_EL0 system register in case it was modified. | ||||
|     thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||||
|     cpu_core.ClearExclusiveState(); | ||||
|  | ||||
|     if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { | ||||
|         prev_thread = thread; | ||||
|     } else { | ||||
|         prev_thread = nullptr; | ||||
|     } | ||||
|  | ||||
|     thread->context_guard.Unlock(); | ||||
| } | ||||
|  | ||||
| void KScheduler::Reload(KThread* thread) { | ||||
| @@ -683,11 +694,6 @@ void KScheduler::Reload(KThread* thread) { | ||||
|     if (thread) { | ||||
|         ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); | ||||
|  | ||||
|         auto* const thread_owner_process = thread->GetOwnerProcess(); | ||||
|         if (thread_owner_process != nullptr) { | ||||
|             system.Kernel().MakeCurrentProcess(thread_owner_process); | ||||
|         } | ||||
|  | ||||
|         Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | ||||
|         cpu_core.LoadContext(thread->GetContext32()); | ||||
|         cpu_core.LoadContext(thread->GetContext64()); | ||||
| @@ -705,7 +711,7 @@ void KScheduler::SwitchContextStep2() { | ||||
| } | ||||
|  | ||||
| void KScheduler::ScheduleImpl() { | ||||
|     KThread* previous_thread = current_thread.load(); | ||||
|     KThread* previous_thread = GetCurrentThread(); | ||||
|     KThread* next_thread = state.highest_priority_thread; | ||||
|  | ||||
|     state.needs_scheduling = false; | ||||
| @@ -717,10 +723,15 @@ void KScheduler::ScheduleImpl() { | ||||
|  | ||||
|     // If we're not actually switching thread, there's nothing to do. | ||||
|     if (next_thread == current_thread.load()) { | ||||
|         previous_thread->EnableDispatch(); | ||||
|         guard.Unlock(); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     if (next_thread->GetCurrentCore() != core_id) { | ||||
|         next_thread->SetCurrentCore(core_id); | ||||
|     } | ||||
|  | ||||
|     current_thread.store(next_thread); | ||||
|  | ||||
|     KProcess* const previous_process = system.Kernel().CurrentProcess(); | ||||
| @@ -731,11 +742,7 @@ void KScheduler::ScheduleImpl() { | ||||
|     Unload(previous_thread); | ||||
|  | ||||
|     std::shared_ptr<Common::Fiber>* old_context; | ||||
|     if (previous_thread != nullptr) { | ||||
|         old_context = &previous_thread->GetHostContext(); | ||||
|     } else { | ||||
|         old_context = &idle_thread->GetHostContext(); | ||||
|     } | ||||
|     old_context = &previous_thread->GetHostContext(); | ||||
|     guard.Unlock(); | ||||
|  | ||||
|     Common::Fiber::YieldTo(*old_context, *switch_fiber); | ||||
|   | ||||
| @@ -33,6 +33,8 @@ public: | ||||
|     explicit KScheduler(Core::System& system_, s32 core_id_); | ||||
|     ~KScheduler(); | ||||
|  | ||||
|     void Finalize(); | ||||
|  | ||||
|     /// Reschedules to the next available thread (call after current thread is suspended) | ||||
|     void RescheduleCurrentCore(); | ||||
|  | ||||
|   | ||||
| @@ -14,6 +14,7 @@ | ||||
| #include "common/fiber.h" | ||||
| #include "common/logging/log.h" | ||||
| #include "common/scope_exit.h" | ||||
| #include "common/settings.h" | ||||
| #include "common/thread_queue_list.h" | ||||
| #include "core/core.h" | ||||
| #include "core/cpu_manager.h" | ||||
| @@ -188,7 +189,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s | ||||
|     // Setup the stack parameters. | ||||
|     StackParameters& sp = GetStackParameters(); | ||||
|     sp.cur_thread = this; | ||||
|     sp.disable_count = 1; | ||||
|     sp.disable_count = 0; | ||||
|     SetInExceptionHandler(); | ||||
|  | ||||
|     // Set thread ID. | ||||
| @@ -215,9 +216,10 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint | ||||
|     // Initialize the thread. | ||||
|     R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); | ||||
|  | ||||
|     // Initialize host context. | ||||
|     // Initialize emulation parameters. | ||||
|     thread->host_context = | ||||
|         std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); | ||||
|     thread->is_single_core = !Settings::values.use_multi_core.GetValue(); | ||||
|  | ||||
|     return ResultSuccess; | ||||
| } | ||||
| @@ -970,6 +972,9 @@ ResultCode KThread::Run() { | ||||
|  | ||||
|         // Set our state and finish. | ||||
|         SetState(ThreadState::Runnable); | ||||
|  | ||||
|         DisableDispatch(); | ||||
|  | ||||
|         return ResultSuccess; | ||||
|     } | ||||
| } | ||||
| @@ -1054,4 +1059,16 @@ s32 GetCurrentCoreId(KernelCore& kernel) { | ||||
|     return GetCurrentThread(kernel).GetCurrentCore(); | ||||
| } | ||||
|  | ||||
| KScopedDisableDispatch::~KScopedDisableDispatch() { | ||||
|     if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { | ||||
|         auto scheduler = kernel.CurrentScheduler(); | ||||
|  | ||||
|         if (scheduler) { | ||||
|             scheduler->RescheduleCurrentCore(); | ||||
|         } | ||||
|     } else { | ||||
|         GetCurrentThread(kernel).EnableDispatch(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -450,16 +450,39 @@ public: | ||||
|         sleeping_queue = q; | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] bool IsKernelThread() const { | ||||
|         return GetActiveCore() == 3; | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] bool IsDispatchTrackingDisabled() const { | ||||
|         return is_single_core || IsKernelThread(); | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] s32 GetDisableDispatchCount() const { | ||||
|         if (IsDispatchTrackingDisabled()) { | ||||
|             // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||||
|             return 1; | ||||
|         } | ||||
|  | ||||
|         return this->GetStackParameters().disable_count; | ||||
|     } | ||||
|  | ||||
|     void DisableDispatch() { | ||||
|         if (IsDispatchTrackingDisabled()) { | ||||
|             // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); | ||||
|         this->GetStackParameters().disable_count++; | ||||
|     } | ||||
|  | ||||
|     void EnableDispatch() { | ||||
|         if (IsDispatchTrackingDisabled()) { | ||||
|             // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); | ||||
|         this->GetStackParameters().disable_count--; | ||||
|     } | ||||
| @@ -708,6 +731,7 @@ private: | ||||
|  | ||||
|     // For emulation | ||||
|     std::shared_ptr<Common::Fiber> host_context{}; | ||||
|     bool is_single_core{}; | ||||
|  | ||||
|     // For debugging | ||||
|     std::vector<KSynchronizationObject*> wait_objects_for_debugging; | ||||
| @@ -752,4 +776,16 @@ public: | ||||
|     } | ||||
| }; | ||||
|  | ||||
| class KScopedDisableDispatch { | ||||
| public: | ||||
|     [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { | ||||
|         GetCurrentThread(kernel).DisableDispatch(); | ||||
|     } | ||||
|  | ||||
|     ~KScopedDisableDispatch(); | ||||
|  | ||||
| private: | ||||
|     KernelCore& kernel; | ||||
| }; | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -85,8 +85,9 @@ struct KernelCore::Impl { | ||||
|     } | ||||
|  | ||||
|     void InitializeCores() { | ||||
|         for (auto& core : cores) { | ||||
|             core.Initialize(current_process->Is64BitProcess()); | ||||
|         for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||||
|             cores[core_id].Initialize(current_process->Is64BitProcess()); | ||||
|             system.Memory().SetCurrentPageTable(*current_process, core_id); | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -131,15 +132,6 @@ struct KernelCore::Impl { | ||||
|         next_user_process_id = KProcess::ProcessIDMin; | ||||
|         next_thread_id = 1; | ||||
|  | ||||
|         for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||||
|             if (suspend_threads[core_id]) { | ||||
|                 suspend_threads[core_id]->Close(); | ||||
|                 suspend_threads[core_id] = nullptr; | ||||
|             } | ||||
|  | ||||
|             schedulers[core_id].reset(); | ||||
|         } | ||||
|  | ||||
|         cores.clear(); | ||||
|  | ||||
|         global_handle_table->Finalize(); | ||||
| @@ -167,6 +159,16 @@ struct KernelCore::Impl { | ||||
|         CleanupObject(time_shared_mem); | ||||
|         CleanupObject(system_resource_limit); | ||||
|  | ||||
|         for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||||
|             if (suspend_threads[core_id]) { | ||||
|                 suspend_threads[core_id]->Close(); | ||||
|                 suspend_threads[core_id] = nullptr; | ||||
|             } | ||||
|  | ||||
|             schedulers[core_id]->Finalize(); | ||||
|             schedulers[core_id].reset(); | ||||
|         } | ||||
|  | ||||
|         // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others | ||||
|         next_host_thread_id = Core::Hardware::NUM_CPU_CORES; | ||||
|  | ||||
| @@ -257,14 +259,6 @@ struct KernelCore::Impl { | ||||
|  | ||||
|     void MakeCurrentProcess(KProcess* process) { | ||||
|         current_process = process; | ||||
|         if (process == nullptr) { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         const u32 core_id = GetCurrentHostThreadID(); | ||||
|         if (core_id < Core::Hardware::NUM_CPU_CORES) { | ||||
|             system.Memory().SetCurrentPageTable(*process, core_id); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     static inline thread_local u32 host_thread_id = UINT32_MAX; | ||||
| @@ -827,16 +821,20 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { | ||||
|     return impl->cores[id]; | ||||
| } | ||||
|  | ||||
| size_t KernelCore::CurrentPhysicalCoreIndex() const { | ||||
|     const u32 core_id = impl->GetCurrentHostThreadID(); | ||||
|     if (core_id >= Core::Hardware::NUM_CPU_CORES) { | ||||
|         return Core::Hardware::NUM_CPU_CORES - 1; | ||||
|     } | ||||
|     return core_id; | ||||
| } | ||||
|  | ||||
| Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { | ||||
|     u32 core_id = impl->GetCurrentHostThreadID(); | ||||
|     ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||||
|     return impl->cores[core_id]; | ||||
|     return impl->cores[CurrentPhysicalCoreIndex()]; | ||||
| } | ||||
|  | ||||
| const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { | ||||
|     u32 core_id = impl->GetCurrentHostThreadID(); | ||||
|     ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||||
|     return impl->cores[core_id]; | ||||
|     return impl->cores[CurrentPhysicalCoreIndex()]; | ||||
| } | ||||
|  | ||||
| Kernel::KScheduler* KernelCore::CurrentScheduler() { | ||||
| @@ -1029,6 +1027,9 @@ void KernelCore::Suspend(bool in_suspention) { | ||||
|             impl->suspend_threads[core_id]->SetState(state); | ||||
|             impl->suspend_threads[core_id]->SetWaitReasonForDebugging( | ||||
|                 ThreadWaitReasonForDebugging::Suspended); | ||||
|             if (!should_suspend) { | ||||
|                 impl->suspend_threads[core_id]->DisableDispatch(); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @@ -1043,13 +1044,11 @@ void KernelCore::ExceptionalExit() { | ||||
| } | ||||
|  | ||||
| void KernelCore::EnterSVCProfile() { | ||||
|     std::size_t core = impl->GetCurrentHostThreadID(); | ||||
|     impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | ||||
|     impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | ||||
| } | ||||
|  | ||||
| void KernelCore::ExitSVCProfile() { | ||||
|     std::size_t core = impl->GetCurrentHostThreadID(); | ||||
|     MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); | ||||
|     MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); | ||||
| } | ||||
|  | ||||
| std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | ||||
|   | ||||
| @@ -146,6 +146,9 @@ public: | ||||
|     /// Gets the an instance of the respective physical CPU core. | ||||
|     const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; | ||||
|  | ||||
|     /// Gets the current physical core index for the running host thread. | ||||
|     std::size_t CurrentPhysicalCoreIndex() const; | ||||
|  | ||||
|     /// Gets the sole instance of the Scheduler at the current running core. | ||||
|     Kernel::KScheduler* CurrentScheduler(); | ||||
|  | ||||
|   | ||||
| @@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle | ||||
|             const u64 thread_ticks = current_thread->GetCpuTime(); | ||||
|  | ||||
|             out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); | ||||
|         } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { | ||||
|         } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { | ||||
|             out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; | ||||
|         } | ||||
|  | ||||
|   | ||||
| @@ -9,17 +9,20 @@ | ||||
| #include "core/core.h" | ||||
| #include "core/hle/kernel/k_writable_event.h" | ||||
| #include "core/hle/kernel/kernel.h" | ||||
| #include "core/hle/service/kernel_helpers.h" | ||||
| #include "core/hle/service/nvflinger/buffer_queue.h" | ||||
|  | ||||
| namespace Service::NVFlinger { | ||||
|  | ||||
| BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_) | ||||
|     : id(id_), layer_id(layer_id_), buffer_wait_event{kernel} { | ||||
|     Kernel::KAutoObject::Create(std::addressof(buffer_wait_event)); | ||||
|     buffer_wait_event.Initialize("BufferQueue:WaitEvent"); | ||||
| BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, | ||||
|                          KernelHelpers::ServiceContext& service_context_) | ||||
|     : id(id_), layer_id(layer_id_), service_context{service_context_} { | ||||
|     buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent"); | ||||
| } | ||||
|  | ||||
| BufferQueue::~BufferQueue() = default; | ||||
| BufferQueue::~BufferQueue() { | ||||
|     service_context.CloseEvent(buffer_wait_event); | ||||
| } | ||||
|  | ||||
| void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) { | ||||
|     ASSERT(slot < buffer_slots); | ||||
| @@ -41,7 +44,7 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) | ||||
|         .multi_fence = {}, | ||||
|     }; | ||||
|  | ||||
|     buffer_wait_event.GetWritableEvent().Signal(); | ||||
|     buffer_wait_event->GetWritableEvent().Signal(); | ||||
| } | ||||
|  | ||||
| std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width, | ||||
| @@ -123,7 +126,7 @@ void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& mult | ||||
|     } | ||||
|     free_buffers_condition.notify_one(); | ||||
|  | ||||
|     buffer_wait_event.GetWritableEvent().Signal(); | ||||
|     buffer_wait_event->GetWritableEvent().Signal(); | ||||
| } | ||||
|  | ||||
| std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() { | ||||
| @@ -158,7 +161,7 @@ void BufferQueue::ReleaseBuffer(u32 slot) { | ||||
|     } | ||||
|     free_buffers_condition.notify_one(); | ||||
|  | ||||
|     buffer_wait_event.GetWritableEvent().Signal(); | ||||
|     buffer_wait_event->GetWritableEvent().Signal(); | ||||
| } | ||||
|  | ||||
| void BufferQueue::Connect() { | ||||
| @@ -173,7 +176,7 @@ void BufferQueue::Disconnect() { | ||||
|         std::unique_lock lock{queue_sequence_mutex}; | ||||
|         queue_sequence.clear(); | ||||
|     } | ||||
|     buffer_wait_event.GetWritableEvent().Signal(); | ||||
|     buffer_wait_event->GetWritableEvent().Signal(); | ||||
|     is_connect = false; | ||||
|     free_buffers_condition.notify_one(); | ||||
| } | ||||
| @@ -193,11 +196,11 @@ u32 BufferQueue::Query(QueryType type) { | ||||
| } | ||||
|  | ||||
| Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() { | ||||
|     return buffer_wait_event.GetWritableEvent(); | ||||
|     return buffer_wait_event->GetWritableEvent(); | ||||
| } | ||||
|  | ||||
| Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() { | ||||
|     return buffer_wait_event.GetReadableEvent(); | ||||
|     return buffer_wait_event->GetReadableEvent(); | ||||
| } | ||||
|  | ||||
| } // namespace Service::NVFlinger | ||||
|   | ||||
| @@ -24,6 +24,10 @@ class KReadableEvent; | ||||
| class KWritableEvent; | ||||
| } // namespace Kernel | ||||
|  | ||||
| namespace Service::KernelHelpers { | ||||
| class ServiceContext; | ||||
| } // namespace Service::KernelHelpers | ||||
|  | ||||
| namespace Service::NVFlinger { | ||||
|  | ||||
| constexpr u32 buffer_slots = 0x40; | ||||
| @@ -54,7 +58,8 @@ public: | ||||
|         NativeWindowFormat = 2, | ||||
|     }; | ||||
|  | ||||
|     explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_); | ||||
|     explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, | ||||
|                          KernelHelpers::ServiceContext& service_context_); | ||||
|     ~BufferQueue(); | ||||
|  | ||||
|     enum class BufferTransformFlags : u32 { | ||||
| @@ -131,12 +136,14 @@ private: | ||||
|     std::list<u32> free_buffers; | ||||
|     std::array<Buffer, buffer_slots> buffers; | ||||
|     std::list<u32> queue_sequence; | ||||
|     Kernel::KEvent buffer_wait_event; | ||||
|     Kernel::KEvent* buffer_wait_event{}; | ||||
|  | ||||
|     std::mutex free_buffers_mutex; | ||||
|     std::condition_variable free_buffers_condition; | ||||
|  | ||||
|     std::mutex queue_sequence_mutex; | ||||
|  | ||||
|     KernelHelpers::ServiceContext& service_context; | ||||
| }; | ||||
|  | ||||
| } // namespace Service::NVFlinger | ||||
|   | ||||
| @@ -61,12 +61,13 @@ void NVFlinger::SplitVSync() { | ||||
|     } | ||||
| } | ||||
|  | ||||
| NVFlinger::NVFlinger(Core::System& system_) : system(system_) { | ||||
|     displays.emplace_back(0, "Default", system); | ||||
|     displays.emplace_back(1, "External", system); | ||||
|     displays.emplace_back(2, "Edid", system); | ||||
|     displays.emplace_back(3, "Internal", system); | ||||
|     displays.emplace_back(4, "Null", system); | ||||
| NVFlinger::NVFlinger(Core::System& system_) | ||||
|     : system(system_), service_context(system_, "nvflinger") { | ||||
|     displays.emplace_back(0, "Default", service_context, system); | ||||
|     displays.emplace_back(1, "External", service_context, system); | ||||
|     displays.emplace_back(2, "Edid", service_context, system); | ||||
|     displays.emplace_back(3, "Internal", service_context, system); | ||||
|     displays.emplace_back(4, "Null", service_context, system); | ||||
|     guard = std::make_shared<std::mutex>(); | ||||
|  | ||||
|     // Schedule the screen composition events | ||||
| @@ -146,7 +147,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { | ||||
| void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { | ||||
|     const u32 buffer_queue_id = next_buffer_queue_id++; | ||||
|     buffer_queues.emplace_back( | ||||
|         std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id)); | ||||
|         std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id, service_context)); | ||||
|     display.CreateLayer(layer_id, *buffer_queues.back()); | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -15,6 +15,7 @@ | ||||
| #include <vector> | ||||
|  | ||||
| #include "common/common_types.h" | ||||
| #include "core/hle/service/kernel_helpers.h" | ||||
|  | ||||
| namespace Common { | ||||
| class Event; | ||||
| @@ -137,6 +138,8 @@ private: | ||||
|     std::unique_ptr<std::thread> vsync_thread; | ||||
|     std::unique_ptr<Common::Event> wait_event; | ||||
|     std::atomic<bool> is_running{}; | ||||
|  | ||||
|     KernelHelpers::ServiceContext service_context; | ||||
| }; | ||||
|  | ||||
| } // namespace Service::NVFlinger | ||||
|   | ||||
| @@ -12,18 +12,21 @@ | ||||
| #include "core/hle/kernel/k_event.h" | ||||
| #include "core/hle/kernel/k_readable_event.h" | ||||
| #include "core/hle/kernel/k_writable_event.h" | ||||
| #include "core/hle/service/kernel_helpers.h" | ||||
| #include "core/hle/service/vi/display/vi_display.h" | ||||
| #include "core/hle/service/vi/layer/vi_layer.h" | ||||
|  | ||||
| namespace Service::VI { | ||||
|  | ||||
| Display::Display(u64 id, std::string name_, Core::System& system) | ||||
|     : display_id{id}, name{std::move(name_)}, vsync_event{system.Kernel()} { | ||||
|     Kernel::KAutoObject::Create(std::addressof(vsync_event)); | ||||
|     vsync_event.Initialize(fmt::format("Display VSync Event {}", id)); | ||||
| Display::Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_, | ||||
|                  Core::System& system_) | ||||
|     : display_id{id}, name{std::move(name_)}, service_context{service_context_} { | ||||
|     vsync_event = service_context.CreateEvent(fmt::format("Display VSync Event {}", id)); | ||||
| } | ||||
|  | ||||
| Display::~Display() = default; | ||||
| Display::~Display() { | ||||
|     service_context.CloseEvent(vsync_event); | ||||
| } | ||||
|  | ||||
| Layer& Display::GetLayer(std::size_t index) { | ||||
|     return *layers.at(index); | ||||
| @@ -34,11 +37,11 @@ const Layer& Display::GetLayer(std::size_t index) const { | ||||
| } | ||||
|  | ||||
| Kernel::KReadableEvent& Display::GetVSyncEvent() { | ||||
|     return vsync_event.GetReadableEvent(); | ||||
|     return vsync_event->GetReadableEvent(); | ||||
| } | ||||
|  | ||||
| void Display::SignalVSyncEvent() { | ||||
|     vsync_event.GetWritableEvent().Signal(); | ||||
|     vsync_event->GetWritableEvent().Signal(); | ||||
| } | ||||
|  | ||||
| void Display::CreateLayer(u64 layer_id, NVFlinger::BufferQueue& buffer_queue) { | ||||
|   | ||||
| @@ -18,6 +18,9 @@ class KEvent; | ||||
| namespace Service::NVFlinger { | ||||
| class BufferQueue; | ||||
| } | ||||
| namespace Service::KernelHelpers { | ||||
| class ServiceContext; | ||||
| } // namespace Service::KernelHelpers | ||||
|  | ||||
| namespace Service::VI { | ||||
|  | ||||
| @@ -31,10 +34,13 @@ class Display { | ||||
| public: | ||||
|     /// Constructs a display with a given unique ID and name. | ||||
|     /// | ||||
|     /// @param id   The unique ID for this display. | ||||
|     /// @param id The unique ID for this display. | ||||
|     /// @param service_context_ The ServiceContext for the owning service. | ||||
|     /// @param name_ The name for this display. | ||||
|     /// @param system_ The global system instance. | ||||
|     /// | ||||
|     Display(u64 id, std::string name_, Core::System& system); | ||||
|     Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_, | ||||
|             Core::System& system_); | ||||
|     ~Display(); | ||||
|  | ||||
|     /// Gets the unique ID assigned to this display. | ||||
| @@ -98,9 +104,10 @@ public: | ||||
| private: | ||||
|     u64 display_id; | ||||
|     std::string name; | ||||
|     KernelHelpers::ServiceContext& service_context; | ||||
|  | ||||
|     std::vector<std::shared_ptr<Layer>> layers; | ||||
|     Kernel::KEvent vsync_event; | ||||
|     Kernel::KEvent* vsync_event{}; | ||||
| }; | ||||
|  | ||||
| } // namespace Service::VI | ||||
|   | ||||
		Reference in New Issue
	
	Block a user