early-access version 1978
This commit is contained in:
		| @@ -1,7 +1,7 @@ | ||||
| yuzu emulator early access | ||||
| ============= | ||||
|  | ||||
| This is the source code for early-access 1975. | ||||
| This is the source code for early-access 1978. | ||||
|  | ||||
| ## Legal Notice | ||||
|  | ||||
|   | ||||
| @@ -59,7 +59,6 @@ void LogSettings() { | ||||
|     log_setting("Renderer_UseVsync", values.use_vsync.GetValue()); | ||||
|     log_setting("Renderer_ShaderBackend", values.shader_backend.GetValue()); | ||||
|     log_setting("Renderer_UseAsynchronousShaders", values.use_asynchronous_shaders.GetValue()); | ||||
|     log_setting("Renderer_UseGarbageCollection", values.use_caches_gc.GetValue()); | ||||
|     log_setting("Renderer_AnisotropicFilteringLevel", values.max_anisotropy.GetValue()); | ||||
|     log_setting("Audio_OutputEngine", values.sink_id.GetValue()); | ||||
|     log_setting("Audio_EnableAudioStretching", values.enable_audio_stretching.GetValue()); | ||||
| @@ -143,7 +142,6 @@ void RestoreGlobalState(bool is_powered_on) { | ||||
|     values.shader_backend.SetGlobal(true); | ||||
|     values.use_asynchronous_shaders.SetGlobal(true); | ||||
|     values.use_fast_gpu_time.SetGlobal(true); | ||||
|     values.use_caches_gc.SetGlobal(true); | ||||
|     values.bg_red.SetGlobal(true); | ||||
|     values.bg_green.SetGlobal(true); | ||||
|     values.bg_blue.SetGlobal(true); | ||||
|   | ||||
| @@ -350,7 +350,6 @@ struct Values { | ||||
|     Setting<ShaderBackend> shader_backend{ShaderBackend::GLASM, "shader_backend"}; | ||||
|     Setting<bool> use_asynchronous_shaders{false, "use_asynchronous_shaders"}; | ||||
|     Setting<bool> use_fast_gpu_time{true, "use_fast_gpu_time"}; | ||||
|     Setting<bool> use_caches_gc{false, "use_caches_gc"}; | ||||
|  | ||||
|     Setting<u8> bg_red{0, "bg_red"}; | ||||
|     Setting<u8> bg_green{0, "bg_green"}; | ||||
|   | ||||
| @@ -494,6 +494,12 @@ const ARM_Interface& System::CurrentArmInterface() const { | ||||
|     return impl->kernel.CurrentPhysicalCore().ArmInterface(); | ||||
| } | ||||
|  | ||||
| std::size_t System::CurrentCoreIndex() const { | ||||
|     std::size_t core = impl->kernel.GetCurrentHostThreadID(); | ||||
|     ASSERT(core < Core::Hardware::NUM_CPU_CORES); | ||||
|     return core; | ||||
| } | ||||
|  | ||||
| Kernel::PhysicalCore& System::CurrentPhysicalCore() { | ||||
|     return impl->kernel.CurrentPhysicalCore(); | ||||
| } | ||||
|   | ||||
| @@ -205,6 +205,9 @@ public: | ||||
|     /// Gets an ARM interface to the CPU core that is currently running | ||||
|     [[nodiscard]] const ARM_Interface& CurrentArmInterface() const; | ||||
|  | ||||
|     /// Gets the index of the currently running CPU core | ||||
|     [[nodiscard]] std::size_t CurrentCoreIndex() const; | ||||
|  | ||||
|     /// Gets the physical core for the CPU core that is currently running | ||||
|     [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); | ||||
|  | ||||
|   | ||||
| @@ -21,25 +21,34 @@ namespace Core { | ||||
| CpuManager::CpuManager(System& system_) : system{system_} {} | ||||
| CpuManager::~CpuManager() = default; | ||||
|  | ||||
| void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, | ||||
|                              std::size_t core) { | ||||
|     cpu_manager.RunThread(stop_token, core); | ||||
| void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) { | ||||
|     cpu_manager.RunThread(core); | ||||
| } | ||||
|  | ||||
| void CpuManager::Initialize() { | ||||
|     running_mode = true; | ||||
|     if (is_multicore) { | ||||
|         for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||||
|             core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core); | ||||
|             core_data[core].host_thread = | ||||
|                 std::make_unique<std::thread>(ThreadStart, std::ref(*this), core); | ||||
|         } | ||||
|     } else { | ||||
|         core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), -1); | ||||
|         core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::Shutdown() { | ||||
|     running_mode = false; | ||||
|     Pause(false); | ||||
|     if (is_multicore) { | ||||
|         for (auto& data : core_data) { | ||||
|             data.host_thread->join(); | ||||
|             data.host_thread.reset(); | ||||
|         } | ||||
|     } else { | ||||
|         core_data[0].host_thread->join(); | ||||
|         core_data[0].host_thread.reset(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() { | ||||
| @@ -118,18 +127,17 @@ void CpuManager::MultiCoreRunGuestLoop() { | ||||
|             physical_core = &kernel.CurrentPhysicalCore(); | ||||
|         } | ||||
|         system.ExitDynarmicProfile(); | ||||
|         { | ||||
|             Kernel::KScopedDisableDispatch dd(kernel); | ||||
|             physical_core->ArmInterface().ClearExclusiveState(); | ||||
|         } | ||||
|         physical_core->ArmInterface().ClearExclusiveState(); | ||||
|         kernel.CurrentScheduler()->RescheduleCurrentCore(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::MultiCoreRunIdleThread() { | ||||
|     auto& kernel = system.Kernel(); | ||||
|     while (true) { | ||||
|         Kernel::KScopedDisableDispatch dd(kernel); | ||||
|         kernel.CurrentPhysicalCore().Idle(); | ||||
|         auto& physical_core = kernel.CurrentPhysicalCore(); | ||||
|         physical_core.Idle(); | ||||
|         kernel.CurrentScheduler()->RescheduleCurrentCore(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -137,12 +145,12 @@ void CpuManager::MultiCoreRunSuspendThread() { | ||||
|     auto& kernel = system.Kernel(); | ||||
|     kernel.CurrentScheduler()->OnThreadStart(); | ||||
|     while (true) { | ||||
|         auto core = kernel.CurrentPhysicalCoreIndex(); | ||||
|         auto core = kernel.GetCurrentHostThreadID(); | ||||
|         auto& scheduler = *kernel.CurrentScheduler(); | ||||
|         Kernel::KThread* current_thread = scheduler.GetCurrentThread(); | ||||
|         Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); | ||||
|         ASSERT(scheduler.ContextSwitchPending()); | ||||
|         ASSERT(core == kernel.CurrentPhysicalCoreIndex()); | ||||
|         ASSERT(core == kernel.GetCurrentHostThreadID()); | ||||
|         scheduler.RescheduleCurrentCore(); | ||||
|     } | ||||
| } | ||||
| @@ -309,7 +317,7 @@ void CpuManager::Pause(bool paused) { | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { | ||||
| void CpuManager::RunThread(std::size_t core) { | ||||
|     /// Initialization | ||||
|     system.RegisterCoreThread(core); | ||||
|     std::string name; | ||||
| @@ -348,8 +356,8 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { | ||||
|             sc_sync_first_use = false; | ||||
|         } | ||||
|  | ||||
|         // Emulation was stopped | ||||
|         if (stop_token.stop_requested()) { | ||||
|         // Abort if emulation was killed before the session really starts | ||||
|         if (!system.IsPoweredOn()) { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|   | ||||
| @@ -78,9 +78,9 @@ private: | ||||
|     void SingleCoreRunSuspendThread(); | ||||
|     void SingleCorePause(bool paused); | ||||
|  | ||||
|     static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); | ||||
|     static void ThreadStart(CpuManager& cpu_manager, std::size_t core); | ||||
|  | ||||
|     void RunThread(std::stop_token stop_token, std::size_t core); | ||||
|     void RunThread(std::size_t core); | ||||
|  | ||||
|     struct CoreData { | ||||
|         std::shared_ptr<Common::Fiber> host_context; | ||||
| @@ -89,7 +89,7 @@ private: | ||||
|         std::atomic<bool> is_running; | ||||
|         std::atomic<bool> is_paused; | ||||
|         std::atomic<bool> initialized; | ||||
|         std::jthread host_thread; | ||||
|         std::unique_ptr<std::thread> host_thread; | ||||
|     }; | ||||
|  | ||||
|     std::atomic<bool> running_mode{}; | ||||
|   | ||||
| @@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) { | ||||
|  | ||||
| bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { | ||||
|     auto& monitor = system.Monitor(); | ||||
|     const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | ||||
|     const auto current_core = system.CurrentCoreIndex(); | ||||
|  | ||||
|     // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | ||||
|     // TODO(bunnei): We should call CanAccessAtomic(..) here. | ||||
| @@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu | ||||
|  | ||||
| bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { | ||||
|     auto& monitor = system.Monitor(); | ||||
|     const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | ||||
|     const auto current_core = system.CurrentCoreIndex(); | ||||
|  | ||||
|     // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | ||||
|     // TODO(bunnei): We should call CanAccessAtomic(..) here. | ||||
|   | ||||
| @@ -170,10 +170,6 @@ public: | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     const std::string& GetName() const { | ||||
|         return name; | ||||
|     } | ||||
|  | ||||
| private: | ||||
|     void RegisterWithKernel(); | ||||
|     void UnregisterWithKernel(); | ||||
|   | ||||
| @@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) { | ||||
| bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, | ||||
|                       u32 new_orr_mask) { | ||||
|     auto& monitor = system.Monitor(); | ||||
|     const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | ||||
|     const auto current_core = system.CurrentCoreIndex(); | ||||
|  | ||||
|     // Load the value from the address. | ||||
|     const auto expected = monitor.ExclusiveRead32(current_core, address); | ||||
|   | ||||
| @@ -13,7 +13,6 @@ ResultCode KHandleTable::Finalize() { | ||||
|     // Get the table and clear our record of it. | ||||
|     u16 saved_table_size = 0; | ||||
|     { | ||||
|         KScopedDisableDispatch dd(kernel); | ||||
|         KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|         std::swap(m_table_size, saved_table_size); | ||||
| @@ -44,7 +43,6 @@ bool KHandleTable::Remove(Handle handle) { | ||||
|     // Find the object and free the entry. | ||||
|     KAutoObject* obj = nullptr; | ||||
|     { | ||||
|         KScopedDisableDispatch dd(kernel); | ||||
|         KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|         if (this->IsValidHandle(handle)) { | ||||
| @@ -63,7 +61,6 @@ bool KHandleTable::Remove(Handle handle) { | ||||
| } | ||||
|  | ||||
| ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | ||||
|     KScopedDisableDispatch dd(kernel); | ||||
|     KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|     // Never exceed our capacity. | ||||
| @@ -86,7 +83,6 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | ||||
| } | ||||
|  | ||||
| ResultCode KHandleTable::Reserve(Handle* out_handle) { | ||||
|     KScopedDisableDispatch dd(kernel); | ||||
|     KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|     // Never exceed our capacity. | ||||
| @@ -97,7 +93,6 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) { | ||||
| } | ||||
|  | ||||
| void KHandleTable::Unreserve(Handle handle) { | ||||
|     KScopedDisableDispatch dd(kernel); | ||||
|     KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|     // Unpack the handle. | ||||
| @@ -116,7 +111,6 @@ void KHandleTable::Unreserve(Handle handle) { | ||||
| } | ||||
|  | ||||
| void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { | ||||
|     KScopedDisableDispatch dd(kernel); | ||||
|     KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|     // Unpack the handle. | ||||
|   | ||||
| @@ -69,7 +69,6 @@ public: | ||||
|     template <typename T = KAutoObject> | ||||
|     KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | ||||
|         // Lock and look up in table. | ||||
|         KScopedDisableDispatch dd(kernel); | ||||
|         KScopedSpinLock lk(m_lock); | ||||
|  | ||||
|         if constexpr (std::is_same_v<T, KAutoObject>) { | ||||
| @@ -124,7 +123,6 @@ public: | ||||
|         size_t num_opened; | ||||
|         { | ||||
|             // Lock the table. | ||||
|             KScopedDisableDispatch dd(kernel); | ||||
|             KScopedSpinLock lk(m_lock); | ||||
|             for (num_opened = 0; num_opened < num_handles; num_opened++) { | ||||
|                 // Get the current handle. | ||||
|   | ||||
| @@ -59,7 +59,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority | ||||
|     thread->GetContext64().cpu_registers[0] = 0; | ||||
|     thread->GetContext32().cpu_registers[1] = thread_handle; | ||||
|     thread->GetContext64().cpu_registers[1] = thread_handle; | ||||
|     thread->DisableDispatch(); | ||||
|  | ||||
|     auto& kernel = system.Kernel(); | ||||
|     // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires | ||||
|   | ||||
| @@ -376,18 +376,20 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { | ||||
| } | ||||
|  | ||||
| void KScheduler::DisableScheduling(KernelCore& kernel) { | ||||
|     ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); | ||||
|     GetCurrentThreadPointer(kernel)->DisableDispatch(); | ||||
|     if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { | ||||
|         ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); | ||||
|         scheduler->GetCurrentThread()->DisableDispatch(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | ||||
|     ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1); | ||||
|  | ||||
|     if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) { | ||||
|         GetCurrentThreadPointer(kernel)->EnableDispatch(); | ||||
|     } else { | ||||
|         RescheduleCores(kernel, cores_needing_scheduling); | ||||
|     if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { | ||||
|         ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); | ||||
|         if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { | ||||
|             scheduler->GetCurrentThread()->EnableDispatch(); | ||||
|         } | ||||
|     } | ||||
|     RescheduleCores(kernel, cores_needing_scheduling); | ||||
| } | ||||
|  | ||||
| u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | ||||
| @@ -615,17 +617,13 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c | ||||
|     state.highest_priority_thread = nullptr; | ||||
| } | ||||
|  | ||||
| void KScheduler::Finalize() { | ||||
| KScheduler::~KScheduler() { | ||||
|     if (idle_thread) { | ||||
|         idle_thread->Close(); | ||||
|         idle_thread = nullptr; | ||||
|     } | ||||
| } | ||||
|  | ||||
| KScheduler::~KScheduler() { | ||||
|     ASSERT(!idle_thread); | ||||
| } | ||||
|  | ||||
| KThread* KScheduler::GetCurrentThread() const { | ||||
|     if (auto result = current_thread.load(); result) { | ||||
|         return result; | ||||
| @@ -644,12 +642,10 @@ void KScheduler::RescheduleCurrentCore() { | ||||
|     if (phys_core.IsInterrupted()) { | ||||
|         phys_core.ClearInterrupt(); | ||||
|     } | ||||
|  | ||||
|     guard.Lock(); | ||||
|     if (state.needs_scheduling.load()) { | ||||
|         Schedule(); | ||||
|     } else { | ||||
|         GetCurrentThread()->EnableDispatch(); | ||||
|         guard.Unlock(); | ||||
|     } | ||||
| } | ||||
| @@ -659,33 +655,26 @@ void KScheduler::OnThreadStart() { | ||||
| } | ||||
|  | ||||
| void KScheduler::Unload(KThread* thread) { | ||||
|     ASSERT(thread); | ||||
|  | ||||
|     LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); | ||||
|  | ||||
|     if (thread->IsCallingSvc()) { | ||||
|         thread->ClearIsCallingSvc(); | ||||
|     if (thread) { | ||||
|         if (thread->IsCallingSvc()) { | ||||
|             thread->ClearIsCallingSvc(); | ||||
|         } | ||||
|         if (!thread->IsTerminationRequested()) { | ||||
|             prev_thread = thread; | ||||
|  | ||||
|             Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | ||||
|             cpu_core.SaveContext(thread->GetContext32()); | ||||
|             cpu_core.SaveContext(thread->GetContext64()); | ||||
|             // Save the TPIDR_EL0 system register in case it was modified. | ||||
|             thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||||
|             cpu_core.ClearExclusiveState(); | ||||
|         } else { | ||||
|             prev_thread = nullptr; | ||||
|         } | ||||
|         thread->context_guard.Unlock(); | ||||
|     } | ||||
|  | ||||
|     auto& physical_core = system.Kernel().PhysicalCore(core_id); | ||||
|     if (!physical_core.IsInitialized()) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); | ||||
|     cpu_core.SaveContext(thread->GetContext32()); | ||||
|     cpu_core.SaveContext(thread->GetContext64()); | ||||
|     // Save the TPIDR_EL0 system register in case it was modified. | ||||
|     thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||||
|     cpu_core.ClearExclusiveState(); | ||||
|  | ||||
|     if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { | ||||
|         prev_thread = thread; | ||||
|     } else { | ||||
|         prev_thread = nullptr; | ||||
|     } | ||||
|  | ||||
|     thread->context_guard.Unlock(); | ||||
| } | ||||
|  | ||||
| void KScheduler::Reload(KThread* thread) { | ||||
| @@ -694,6 +683,11 @@ void KScheduler::Reload(KThread* thread) { | ||||
|     if (thread) { | ||||
|         ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); | ||||
|  | ||||
|         auto* const thread_owner_process = thread->GetOwnerProcess(); | ||||
|         if (thread_owner_process != nullptr) { | ||||
|             system.Kernel().MakeCurrentProcess(thread_owner_process); | ||||
|         } | ||||
|  | ||||
|         Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | ||||
|         cpu_core.LoadContext(thread->GetContext32()); | ||||
|         cpu_core.LoadContext(thread->GetContext64()); | ||||
| @@ -711,7 +705,7 @@ void KScheduler::SwitchContextStep2() { | ||||
| } | ||||
|  | ||||
| void KScheduler::ScheduleImpl() { | ||||
|     KThread* previous_thread = GetCurrentThread(); | ||||
|     KThread* previous_thread = current_thread.load(); | ||||
|     KThread* next_thread = state.highest_priority_thread; | ||||
|  | ||||
|     state.needs_scheduling = false; | ||||
| @@ -723,15 +717,10 @@ void KScheduler::ScheduleImpl() { | ||||
|  | ||||
|     // If we're not actually switching thread, there's nothing to do. | ||||
|     if (next_thread == current_thread.load()) { | ||||
|         previous_thread->EnableDispatch(); | ||||
|         guard.Unlock(); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     if (next_thread->GetCurrentCore() != core_id) { | ||||
|         next_thread->SetCurrentCore(core_id); | ||||
|     } | ||||
|  | ||||
|     current_thread.store(next_thread); | ||||
|  | ||||
|     KProcess* const previous_process = system.Kernel().CurrentProcess(); | ||||
| @@ -742,7 +731,11 @@ void KScheduler::ScheduleImpl() { | ||||
|     Unload(previous_thread); | ||||
|  | ||||
|     std::shared_ptr<Common::Fiber>* old_context; | ||||
|     old_context = &previous_thread->GetHostContext(); | ||||
|     if (previous_thread != nullptr) { | ||||
|         old_context = &previous_thread->GetHostContext(); | ||||
|     } else { | ||||
|         old_context = &idle_thread->GetHostContext(); | ||||
|     } | ||||
|     guard.Unlock(); | ||||
|  | ||||
|     Common::Fiber::YieldTo(*old_context, *switch_fiber); | ||||
|   | ||||
| @@ -33,8 +33,6 @@ public: | ||||
|     explicit KScheduler(Core::System& system_, s32 core_id_); | ||||
|     ~KScheduler(); | ||||
|  | ||||
|     void Finalize(); | ||||
|  | ||||
|     /// Reschedules to the next available thread (call after current thread is suspended) | ||||
|     void RescheduleCurrentCore(); | ||||
|  | ||||
|   | ||||
| @@ -188,7 +188,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s | ||||
|     // Setup the stack parameters. | ||||
|     StackParameters& sp = GetStackParameters(); | ||||
|     sp.cur_thread = this; | ||||
|     sp.disable_count = 0; | ||||
|     sp.disable_count = 1; | ||||
|     SetInExceptionHandler(); | ||||
|  | ||||
|     // Set thread ID. | ||||
| @@ -970,9 +970,6 @@ ResultCode KThread::Run() { | ||||
|  | ||||
|         // Set our state and finish. | ||||
|         SetState(ThreadState::Runnable); | ||||
|  | ||||
|         DisableDispatch(); | ||||
|  | ||||
|         return ResultSuccess; | ||||
|     } | ||||
| } | ||||
| @@ -1057,16 +1054,4 @@ s32 GetCurrentCoreId(KernelCore& kernel) { | ||||
|     return GetCurrentThread(kernel).GetCurrentCore(); | ||||
| } | ||||
|  | ||||
| KScopedDisableDispatch::~KScopedDisableDispatch() { | ||||
|     if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { | ||||
|         auto scheduler = kernel.CurrentScheduler(); | ||||
|  | ||||
|         if (scheduler) { | ||||
|             scheduler->RescheduleCurrentCore(); | ||||
|         } | ||||
|     } else { | ||||
|         GetCurrentThread(kernel).EnableDispatch(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -450,35 +450,16 @@ public: | ||||
|         sleeping_queue = q; | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] bool IsKernelThread() const { | ||||
|         return GetActiveCore() == 3; | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] s32 GetDisableDispatchCount() const { | ||||
|         if (IsKernelThread()) { | ||||
|             // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||||
|             return 1; | ||||
|         } | ||||
|  | ||||
|         return this->GetStackParameters().disable_count; | ||||
|     } | ||||
|  | ||||
|     void DisableDispatch() { | ||||
|         if (IsKernelThread()) { | ||||
|             // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); | ||||
|         this->GetStackParameters().disable_count++; | ||||
|     } | ||||
|  | ||||
|     void EnableDispatch() { | ||||
|         if (IsKernelThread()) { | ||||
|             // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); | ||||
|         this->GetStackParameters().disable_count--; | ||||
|     } | ||||
| @@ -771,16 +752,4 @@ public: | ||||
|     } | ||||
| }; | ||||
|  | ||||
| class KScopedDisableDispatch { | ||||
| public: | ||||
|     [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { | ||||
|         GetCurrentThread(kernel).DisableDispatch(); | ||||
|     } | ||||
|  | ||||
|     ~KScopedDisableDispatch(); | ||||
|  | ||||
| private: | ||||
|     KernelCore& kernel; | ||||
| }; | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -85,9 +85,8 @@ struct KernelCore::Impl { | ||||
|     } | ||||
|  | ||||
|     void InitializeCores() { | ||||
|         for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||||
|             cores[core_id].Initialize(current_process->Is64BitProcess()); | ||||
|             system.Memory().SetCurrentPageTable(*current_process, core_id); | ||||
|         for (auto& core : cores) { | ||||
|             core.Initialize(current_process->Is64BitProcess()); | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -132,6 +131,15 @@ struct KernelCore::Impl { | ||||
|         next_user_process_id = KProcess::ProcessIDMin; | ||||
|         next_thread_id = 1; | ||||
|  | ||||
|         for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||||
|             if (suspend_threads[core_id]) { | ||||
|                 suspend_threads[core_id]->Close(); | ||||
|                 suspend_threads[core_id] = nullptr; | ||||
|             } | ||||
|  | ||||
|             schedulers[core_id].reset(); | ||||
|         } | ||||
|  | ||||
|         cores.clear(); | ||||
|  | ||||
|         global_handle_table->Finalize(); | ||||
| @@ -159,16 +167,6 @@ struct KernelCore::Impl { | ||||
|         CleanupObject(time_shared_mem); | ||||
|         CleanupObject(system_resource_limit); | ||||
|  | ||||
|         for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||||
|             if (suspend_threads[core_id]) { | ||||
|                 suspend_threads[core_id]->Close(); | ||||
|                 suspend_threads[core_id] = nullptr; | ||||
|             } | ||||
|  | ||||
|             schedulers[core_id]->Finalize(); | ||||
|             schedulers[core_id].reset(); | ||||
|         } | ||||
|  | ||||
|         // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others | ||||
|         next_host_thread_id = Core::Hardware::NUM_CPU_CORES; | ||||
|  | ||||
| @@ -259,6 +257,14 @@ struct KernelCore::Impl { | ||||
|  | ||||
|     void MakeCurrentProcess(KProcess* process) { | ||||
|         current_process = process; | ||||
|         if (process == nullptr) { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         const u32 core_id = GetCurrentHostThreadID(); | ||||
|         if (core_id < Core::Hardware::NUM_CPU_CORES) { | ||||
|             system.Memory().SetCurrentPageTable(*process, core_id); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     /// Creates a new host thread ID, should only be called by GetHostThreadId | ||||
| @@ -818,20 +824,16 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { | ||||
|     return impl->cores[id]; | ||||
| } | ||||
|  | ||||
| size_t KernelCore::CurrentPhysicalCoreIndex() const { | ||||
|     const u32 core_id = impl->GetCurrentHostThreadID(); | ||||
|     if (core_id >= Core::Hardware::NUM_CPU_CORES) { | ||||
|         return Core::Hardware::NUM_CPU_CORES - 1; | ||||
|     } | ||||
|     return core_id; | ||||
| } | ||||
|  | ||||
| Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { | ||||
|     return impl->cores[CurrentPhysicalCoreIndex()]; | ||||
|     u32 core_id = impl->GetCurrentHostThreadID(); | ||||
|     ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||||
|     return impl->cores[core_id]; | ||||
| } | ||||
|  | ||||
| const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { | ||||
|     return impl->cores[CurrentPhysicalCoreIndex()]; | ||||
|     u32 core_id = impl->GetCurrentHostThreadID(); | ||||
|     ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||||
|     return impl->cores[core_id]; | ||||
| } | ||||
|  | ||||
| Kernel::KScheduler* KernelCore::CurrentScheduler() { | ||||
| @@ -1024,9 +1026,6 @@ void KernelCore::Suspend(bool in_suspention) { | ||||
|             impl->suspend_threads[core_id]->SetState(state); | ||||
|             impl->suspend_threads[core_id]->SetWaitReasonForDebugging( | ||||
|                 ThreadWaitReasonForDebugging::Suspended); | ||||
|             if (!should_suspend) { | ||||
|                 impl->suspend_threads[core_id]->DisableDispatch(); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @@ -1041,11 +1040,13 @@ void KernelCore::ExceptionalExit() { | ||||
| } | ||||
|  | ||||
| void KernelCore::EnterSVCProfile() { | ||||
|     impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | ||||
|     std::size_t core = impl->GetCurrentHostThreadID(); | ||||
|     impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | ||||
| } | ||||
|  | ||||
| void KernelCore::ExitSVCProfile() { | ||||
|     MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); | ||||
|     std::size_t core = impl->GetCurrentHostThreadID(); | ||||
|     MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); | ||||
| } | ||||
|  | ||||
| std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | ||||
|   | ||||
| @@ -146,9 +146,6 @@ public: | ||||
|     /// Gets the an instance of the respective physical CPU core. | ||||
|     const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; | ||||
|  | ||||
|     /// Gets the current physical core index for the running host thread. | ||||
|     std::size_t CurrentPhysicalCoreIndex() const; | ||||
|  | ||||
|     /// Gets the sole instance of the Scheduler at the current running core. | ||||
|     Kernel::KScheduler* CurrentScheduler(); | ||||
|  | ||||
|   | ||||
| @@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle | ||||
|             const u64 thread_ticks = current_thread->GetCpuTime(); | ||||
|  | ||||
|             out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); | ||||
|         } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { | ||||
|         } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { | ||||
|             out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; | ||||
|         } | ||||
|  | ||||
|   | ||||
| @@ -9,20 +9,17 @@ | ||||
| #include "core/core.h" | ||||
| #include "core/hle/kernel/k_writable_event.h" | ||||
| #include "core/hle/kernel/kernel.h" | ||||
| #include "core/hle/service/kernel_helpers.h" | ||||
| #include "core/hle/service/nvflinger/buffer_queue.h" | ||||
|  | ||||
| namespace Service::NVFlinger { | ||||
|  | ||||
| BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, | ||||
|                          KernelHelpers::ServiceContext& service_context_) | ||||
|     : id(id_), layer_id(layer_id_), service_context{service_context_} { | ||||
|     buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent"); | ||||
| BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_) | ||||
|     : id(id_), layer_id(layer_id_), buffer_wait_event{kernel} { | ||||
|     Kernel::KAutoObject::Create(std::addressof(buffer_wait_event)); | ||||
|     buffer_wait_event.Initialize("BufferQueue:WaitEvent"); | ||||
| } | ||||
|  | ||||
| BufferQueue::~BufferQueue() { | ||||
|     service_context.CloseEvent(buffer_wait_event); | ||||
| } | ||||
| BufferQueue::~BufferQueue() = default; | ||||
|  | ||||
| void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) { | ||||
|     ASSERT(slot < buffer_slots); | ||||
| @@ -44,7 +41,7 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) | ||||
|         .multi_fence = {}, | ||||
|     }; | ||||
|  | ||||
|     buffer_wait_event->GetWritableEvent().Signal(); | ||||
|     buffer_wait_event.GetWritableEvent().Signal(); | ||||
| } | ||||
|  | ||||
| std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width, | ||||
| @@ -126,7 +123,7 @@ void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& mult | ||||
|     } | ||||
|     free_buffers_condition.notify_one(); | ||||
|  | ||||
|     buffer_wait_event->GetWritableEvent().Signal(); | ||||
|     buffer_wait_event.GetWritableEvent().Signal(); | ||||
| } | ||||
|  | ||||
| std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() { | ||||
| @@ -161,7 +158,7 @@ void BufferQueue::ReleaseBuffer(u32 slot) { | ||||
|     } | ||||
|     free_buffers_condition.notify_one(); | ||||
|  | ||||
|     buffer_wait_event->GetWritableEvent().Signal(); | ||||
|     buffer_wait_event.GetWritableEvent().Signal(); | ||||
| } | ||||
|  | ||||
| void BufferQueue::Connect() { | ||||
| @@ -176,7 +173,7 @@ void BufferQueue::Disconnect() { | ||||
|         std::unique_lock lock{queue_sequence_mutex}; | ||||
|         queue_sequence.clear(); | ||||
|     } | ||||
|     buffer_wait_event->GetWritableEvent().Signal(); | ||||
|     buffer_wait_event.GetWritableEvent().Signal(); | ||||
|     is_connect = false; | ||||
|     free_buffers_condition.notify_one(); | ||||
| } | ||||
| @@ -196,11 +193,11 @@ u32 BufferQueue::Query(QueryType type) { | ||||
| } | ||||
|  | ||||
| Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() { | ||||
|     return buffer_wait_event->GetWritableEvent(); | ||||
|     return buffer_wait_event.GetWritableEvent(); | ||||
| } | ||||
|  | ||||
| Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() { | ||||
|     return buffer_wait_event->GetReadableEvent(); | ||||
|     return buffer_wait_event.GetReadableEvent(); | ||||
| } | ||||
|  | ||||
| } // namespace Service::NVFlinger | ||||
|   | ||||
| @@ -24,10 +24,6 @@ class KReadableEvent; | ||||
| class KWritableEvent; | ||||
| } // namespace Kernel | ||||
|  | ||||
| namespace Service::KernelHelpers { | ||||
| class ServiceContext; | ||||
| } // namespace Service::KernelHelpers | ||||
|  | ||||
| namespace Service::NVFlinger { | ||||
|  | ||||
| constexpr u32 buffer_slots = 0x40; | ||||
| @@ -58,8 +54,7 @@ public: | ||||
|         NativeWindowFormat = 2, | ||||
|     }; | ||||
|  | ||||
|     explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, | ||||
|                          KernelHelpers::ServiceContext& service_context_); | ||||
|     explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_); | ||||
|     ~BufferQueue(); | ||||
|  | ||||
|     enum class BufferTransformFlags : u32 { | ||||
| @@ -136,14 +131,12 @@ private: | ||||
|     std::list<u32> free_buffers; | ||||
|     std::array<Buffer, buffer_slots> buffers; | ||||
|     std::list<u32> queue_sequence; | ||||
|     Kernel::KEvent* buffer_wait_event{}; | ||||
|     Kernel::KEvent buffer_wait_event; | ||||
|  | ||||
|     std::mutex free_buffers_mutex; | ||||
|     std::condition_variable free_buffers_condition; | ||||
|  | ||||
|     std::mutex queue_sequence_mutex; | ||||
|  | ||||
|     KernelHelpers::ServiceContext& service_context; | ||||
| }; | ||||
|  | ||||
| } // namespace Service::NVFlinger | ||||
|   | ||||
| @@ -61,13 +61,12 @@ void NVFlinger::SplitVSync() { | ||||
|     } | ||||
| } | ||||
|  | ||||
| NVFlinger::NVFlinger(Core::System& system_) | ||||
|     : system(system_), service_context(system_, "nvflinger") { | ||||
|     displays.emplace_back(0, "Default", service_context, system); | ||||
|     displays.emplace_back(1, "External", service_context, system); | ||||
|     displays.emplace_back(2, "Edid", service_context, system); | ||||
|     displays.emplace_back(3, "Internal", service_context, system); | ||||
|     displays.emplace_back(4, "Null", service_context, system); | ||||
| NVFlinger::NVFlinger(Core::System& system_) : system(system_) { | ||||
|     displays.emplace_back(0, "Default", system); | ||||
|     displays.emplace_back(1, "External", system); | ||||
|     displays.emplace_back(2, "Edid", system); | ||||
|     displays.emplace_back(3, "Internal", system); | ||||
|     displays.emplace_back(4, "Null", system); | ||||
|     guard = std::make_shared<std::mutex>(); | ||||
|  | ||||
|     // Schedule the screen composition events | ||||
| @@ -147,7 +146,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { | ||||
| void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { | ||||
|     const u32 buffer_queue_id = next_buffer_queue_id++; | ||||
|     buffer_queues.emplace_back( | ||||
|         std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id, service_context)); | ||||
|         std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id)); | ||||
|     display.CreateLayer(layer_id, *buffer_queues.back()); | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -15,7 +15,6 @@ | ||||
| #include <vector> | ||||
|  | ||||
| #include "common/common_types.h" | ||||
| #include "core/hle/service/kernel_helpers.h" | ||||
|  | ||||
| namespace Common { | ||||
| class Event; | ||||
| @@ -138,8 +137,6 @@ private: | ||||
|     std::unique_ptr<std::thread> vsync_thread; | ||||
|     std::unique_ptr<Common::Event> wait_event; | ||||
|     std::atomic<bool> is_running{}; | ||||
|  | ||||
|     KernelHelpers::ServiceContext service_context; | ||||
| }; | ||||
|  | ||||
| } // namespace Service::NVFlinger | ||||
|   | ||||
| @@ -12,21 +12,18 @@ | ||||
| #include "core/hle/kernel/k_event.h" | ||||
| #include "core/hle/kernel/k_readable_event.h" | ||||
| #include "core/hle/kernel/k_writable_event.h" | ||||
| #include "core/hle/service/kernel_helpers.h" | ||||
| #include "core/hle/service/vi/display/vi_display.h" | ||||
| #include "core/hle/service/vi/layer/vi_layer.h" | ||||
|  | ||||
| namespace Service::VI { | ||||
|  | ||||
| Display::Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_, | ||||
|                  Core::System& system_) | ||||
|     : display_id{id}, name{std::move(name_)}, service_context{service_context_} { | ||||
|     vsync_event = service_context.CreateEvent(fmt::format("Display VSync Event {}", id)); | ||||
| Display::Display(u64 id, std::string name_, Core::System& system) | ||||
|     : display_id{id}, name{std::move(name_)}, vsync_event{system.Kernel()} { | ||||
|     Kernel::KAutoObject::Create(std::addressof(vsync_event)); | ||||
|     vsync_event.Initialize(fmt::format("Display VSync Event {}", id)); | ||||
| } | ||||
|  | ||||
| Display::~Display() { | ||||
|     service_context.CloseEvent(vsync_event); | ||||
| } | ||||
| Display::~Display() = default; | ||||
|  | ||||
| Layer& Display::GetLayer(std::size_t index) { | ||||
|     return *layers.at(index); | ||||
| @@ -37,11 +34,11 @@ const Layer& Display::GetLayer(std::size_t index) const { | ||||
| } | ||||
|  | ||||
| Kernel::KReadableEvent& Display::GetVSyncEvent() { | ||||
|     return vsync_event->GetReadableEvent(); | ||||
|     return vsync_event.GetReadableEvent(); | ||||
| } | ||||
|  | ||||
| void Display::SignalVSyncEvent() { | ||||
|     vsync_event->GetWritableEvent().Signal(); | ||||
|     vsync_event.GetWritableEvent().Signal(); | ||||
| } | ||||
|  | ||||
| void Display::CreateLayer(u64 layer_id, NVFlinger::BufferQueue& buffer_queue) { | ||||
|   | ||||
| @@ -18,9 +18,6 @@ class KEvent; | ||||
| namespace Service::NVFlinger { | ||||
| class BufferQueue; | ||||
| } | ||||
| namespace Service::KernelHelpers { | ||||
| class ServiceContext; | ||||
| } // namespace Service::KernelHelpers | ||||
|  | ||||
| namespace Service::VI { | ||||
|  | ||||
| @@ -34,13 +31,10 @@ class Display { | ||||
| public: | ||||
|     /// Constructs a display with a given unique ID and name. | ||||
|     /// | ||||
|     /// @param id The unique ID for this display. | ||||
|     /// @param service_context_ The ServiceContext for the owning service. | ||||
|     /// @param id   The unique ID for this display. | ||||
|     /// @param name_ The name for this display. | ||||
|     /// @param system_ The global system instance. | ||||
|     /// | ||||
|     Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_, | ||||
|             Core::System& system_); | ||||
|     Display(u64 id, std::string name_, Core::System& system); | ||||
|     ~Display(); | ||||
|  | ||||
|     /// Gets the unique ID assigned to this display. | ||||
| @@ -104,10 +98,9 @@ public: | ||||
| private: | ||||
|     u64 display_id; | ||||
|     std::string name; | ||||
|     KernelHelpers::ServiceContext& service_context; | ||||
|  | ||||
|     std::vector<std::shared_ptr<Layer>> layers; | ||||
|     Kernel::KEvent* vsync_event{}; | ||||
|     Kernel::KEvent vsync_event; | ||||
| }; | ||||
|  | ||||
| } // namespace Service::VI | ||||
|   | ||||
| @@ -170,7 +170,7 @@ public: | ||||
|     float GetAxis(int axis, float range, float offset) const { | ||||
|         std::lock_guard lock{mutex}; | ||||
|         const float value = static_cast<float>(state.axes.at(axis)) / 32767.0f; | ||||
|         return (value + offset) * range; | ||||
|         return (value + offset) / range; | ||||
|     } | ||||
|  | ||||
|     bool RumblePlay(u16 amp_low, u16 amp_high) { | ||||
| @@ -538,8 +538,8 @@ public: | ||||
|     } | ||||
|  | ||||
|     std::tuple<float, float> GetRawStatus() const override { | ||||
|         const float x = joystick->GetAxis(axis_x, 1.0f, offset_x); | ||||
|         const float y = joystick->GetAxis(axis_y, 1.0f, offset_y); | ||||
|         const float x = joystick->GetAxis(axis_x, range, offset_x); | ||||
|         const float y = joystick->GetAxis(axis_y, range, offset_y); | ||||
|         return {x, -y}; | ||||
|     } | ||||
|  | ||||
|   | ||||
| @@ -485,7 +485,7 @@ void BufferCache<P>::TickFrame() { | ||||
|     const bool skip_preferred = hits * 256 < shots * 251; | ||||
|     uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0; | ||||
|  | ||||
|     if (Settings::values.use_caches_gc.GetValue() && total_used_memory >= EXPECTED_MEMORY) { | ||||
|     if (total_used_memory >= EXPECTED_MEMORY) { | ||||
|         RunGarbageCollector(); | ||||
|     } | ||||
|     ++frame_tick; | ||||
|   | ||||
| @@ -23,7 +23,6 @@ | ||||
| #include "common/common_types.h" | ||||
| #include "common/literals.h" | ||||
| #include "common/logging/log.h" | ||||
| #include "common/settings.h" | ||||
| #include "video_core/compatible_formats.h" | ||||
| #include "video_core/delayed_destruction_ring.h" | ||||
| #include "video_core/dirty_flags.h" | ||||
| @@ -510,7 +509,7 @@ void TextureCache<P>::RunGarbageCollector() { | ||||
|  | ||||
| template <class P> | ||||
| void TextureCache<P>::TickFrame() { | ||||
|     if (Settings::values.use_caches_gc.GetValue() && total_used_memory > minimum_memory) { | ||||
|     if (total_used_memory > minimum_memory) { | ||||
|         RunGarbageCollector(); | ||||
|     } | ||||
|     sentenced_images.Tick(); | ||||
|   | ||||
| @@ -832,7 +832,6 @@ void Config::ReadRendererValues() { | ||||
|     ReadGlobalSetting(Settings::values.shader_backend); | ||||
|     ReadGlobalSetting(Settings::values.use_asynchronous_shaders); | ||||
|     ReadGlobalSetting(Settings::values.use_fast_gpu_time); | ||||
|     ReadGlobalSetting(Settings::values.use_caches_gc); | ||||
|     ReadGlobalSetting(Settings::values.bg_red); | ||||
|     ReadGlobalSetting(Settings::values.bg_green); | ||||
|     ReadGlobalSetting(Settings::values.bg_blue); | ||||
| @@ -1381,7 +1380,6 @@ void Config::SaveRendererValues() { | ||||
|                  Settings::values.shader_backend.UsingGlobal()); | ||||
|     WriteGlobalSetting(Settings::values.use_asynchronous_shaders); | ||||
|     WriteGlobalSetting(Settings::values.use_fast_gpu_time); | ||||
|     WriteGlobalSetting(Settings::values.use_caches_gc); | ||||
|     WriteGlobalSetting(Settings::values.bg_red); | ||||
|     WriteGlobalSetting(Settings::values.bg_green); | ||||
|     WriteGlobalSetting(Settings::values.bg_blue); | ||||
|   | ||||
| @@ -28,7 +28,6 @@ void ConfigureGraphicsAdvanced::SetConfiguration() { | ||||
|  | ||||
|     ui->use_vsync->setChecked(Settings::values.use_vsync.GetValue()); | ||||
|     ui->use_asynchronous_shaders->setChecked(Settings::values.use_asynchronous_shaders.GetValue()); | ||||
|     ui->use_caches_gc->setChecked(Settings::values.use_caches_gc.GetValue()); | ||||
|     ui->use_fast_gpu_time->setChecked(Settings::values.use_fast_gpu_time.GetValue()); | ||||
|  | ||||
|     if (Settings::IsConfiguringGlobal()) { | ||||
| @@ -55,8 +54,6 @@ void ConfigureGraphicsAdvanced::ApplyConfiguration() { | ||||
|     ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_asynchronous_shaders, | ||||
|                                              ui->use_asynchronous_shaders, | ||||
|                                              use_asynchronous_shaders); | ||||
|     ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_caches_gc, ui->use_caches_gc, | ||||
|                                              use_caches_gc); | ||||
|     ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_fast_gpu_time, | ||||
|                                              ui->use_fast_gpu_time, use_fast_gpu_time); | ||||
| } | ||||
| @@ -81,7 +78,6 @@ void ConfigureGraphicsAdvanced::SetupPerGameUI() { | ||||
|         ui->use_asynchronous_shaders->setEnabled( | ||||
|             Settings::values.use_asynchronous_shaders.UsingGlobal()); | ||||
|         ui->use_fast_gpu_time->setEnabled(Settings::values.use_fast_gpu_time.UsingGlobal()); | ||||
|         ui->use_caches_gc->setEnabled(Settings::values.use_caches_gc.UsingGlobal()); | ||||
|         ui->anisotropic_filtering_combobox->setEnabled( | ||||
|             Settings::values.max_anisotropy.UsingGlobal()); | ||||
|  | ||||
| @@ -94,8 +90,6 @@ void ConfigureGraphicsAdvanced::SetupPerGameUI() { | ||||
|                                             use_asynchronous_shaders); | ||||
|     ConfigurationShared::SetColoredTristate(ui->use_fast_gpu_time, | ||||
|                                             Settings::values.use_fast_gpu_time, use_fast_gpu_time); | ||||
|     ConfigurationShared::SetColoredTristate(ui->use_caches_gc, Settings::values.use_caches_gc, | ||||
|                                             use_caches_gc); | ||||
|     ConfigurationShared::SetColoredComboBox( | ||||
|         ui->gpu_accuracy, ui->label_gpu_accuracy, | ||||
|         static_cast<int>(Settings::values.gpu_accuracy.GetValue(true))); | ||||
|   | ||||
| @@ -37,5 +37,4 @@ private: | ||||
|     ConfigurationShared::CheckState use_vsync; | ||||
|     ConfigurationShared::CheckState use_asynchronous_shaders; | ||||
|     ConfigurationShared::CheckState use_fast_gpu_time; | ||||
|     ConfigurationShared::CheckState use_caches_gc; | ||||
| }; | ||||
|   | ||||
| @@ -82,24 +82,17 @@ | ||||
|            <string>Enables asynchronous shader compilation, which may reduce shader stutter. This feature is experimental.</string> | ||||
|           </property> | ||||
|           <property name="text"> | ||||
|            <string>Use asynchronous shader building</string> | ||||
|            <string>Use asynchronous shader building (hack)</string> | ||||
|           </property> | ||||
|          </widget> | ||||
|         </item> | ||||
|         <item> | ||||
|          <widget class="QCheckBox" name="use_fast_gpu_time"> | ||||
|           <property name="text"> | ||||
|            <string>Use Fast GPU Time</string> | ||||
|           </property> | ||||
|          </widget> | ||||
|         </item> | ||||
|         <item> | ||||
|          <widget class="QCheckBox" name="use_caches_gc"> | ||||
|           <property name="toolTip"> | ||||
|            <string>Enables garbage collection for the GPU caches, this will try to keep VRAM within 3-4 GB by flushing the least used textures/buffers. May cause issues in a few games.</string> | ||||
|             <string>Enables Fast GPU Time. This option will force most games to run at their highest native resolution.</string> | ||||
|           </property> | ||||
|           <property name="text"> | ||||
|            <string>Enable GPU cache garbage collection (experimental)</string> | ||||
|            <string>Use Fast GPU Time (hack)</string> | ||||
|           </property> | ||||
|          </widget> | ||||
|         </item> | ||||
|   | ||||
| @@ -468,7 +468,6 @@ void Config::ReadValues() { | ||||
|     ReadSetting("Renderer", Settings::values.use_nvdec_emulation); | ||||
|     ReadSetting("Renderer", Settings::values.accelerate_astc); | ||||
|     ReadSetting("Renderer", Settings::values.use_fast_gpu_time); | ||||
|     ReadSetting("Renderer", Settings::values.use_caches_gc); | ||||
|  | ||||
|     ReadSetting("Renderer", Settings::values.bg_red); | ||||
|     ReadSetting("Renderer", Settings::values.bg_green); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user