early-access version 1260
This commit is contained in:
		| @@ -1,7 +1,7 @@ | ||||
| yuzu emulator early access | ||||
| ============= | ||||
|  | ||||
| This is the source code for early-access 1259. | ||||
| This is the source code for early-access 1260. | ||||
|  | ||||
| ## Legal Notice | ||||
|  | ||||
|   | ||||
| @@ -11,6 +11,7 @@ | ||||
| #include "audio_core/info_updater.h" | ||||
| #include "audio_core/voice_context.h" | ||||
| #include "common/logging/log.h" | ||||
| #include "core/hle/kernel/writable_event.h" | ||||
| #include "core/memory.h" | ||||
| #include "core/settings.h" | ||||
|  | ||||
| @@ -70,9 +71,10 @@ namespace { | ||||
| namespace AudioCore { | ||||
| AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_, | ||||
|                              AudioCommon::AudioRendererParameter params, | ||||
|                              Stream::ReleaseCallback&& release_callback, | ||||
|                              std::shared_ptr<Kernel::WritableEvent> buffer_event_, | ||||
|                              std::size_t instance_number) | ||||
|     : worker_params{params}, memory_pool_info(params.effect_count + params.voice_count * 4), | ||||
|     : worker_params{params}, buffer_event{buffer_event_}, | ||||
|       memory_pool_info(params.effect_count + params.voice_count * 4), | ||||
|       voice_context(params.voice_count), effect_context(params.effect_count), mix_context(), | ||||
|       sink_context(params.sink_count), splitter_context(), | ||||
|       voices(params.voice_count), memory{memory_}, | ||||
| @@ -83,9 +85,10 @@ AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory | ||||
|                                 params.num_splitter_send_channels); | ||||
|     mix_context.Initialize(behavior_info, params.submix_count + 1, params.effect_count); | ||||
|     audio_out = std::make_unique<AudioCore::AudioOut>(); | ||||
|     stream = audio_out->OpenStream( | ||||
|         core_timing, params.sample_rate, AudioCommon::STREAM_NUM_CHANNELS, | ||||
|         fmt::format("AudioRenderer-Instance{}", instance_number), std::move(release_callback)); | ||||
|     stream = | ||||
|         audio_out->OpenStream(core_timing, params.sample_rate, AudioCommon::STREAM_NUM_CHANNELS, | ||||
|                               fmt::format("AudioRenderer-Instance{}", instance_number), | ||||
|                               [=]() { buffer_event_->Signal(); }); | ||||
|     audio_out->StartStream(stream); | ||||
|  | ||||
|     QueueMixedBuffer(0); | ||||
|   | ||||
| @@ -27,6 +27,10 @@ namespace Core::Timing { | ||||
| class CoreTiming; | ||||
| } | ||||
|  | ||||
| namespace Kernel { | ||||
| class WritableEvent; | ||||
| } | ||||
|  | ||||
| namespace Core::Memory { | ||||
| class Memory; | ||||
| } | ||||
| @@ -40,7 +44,8 @@ class AudioRenderer { | ||||
| public: | ||||
|     AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_, | ||||
|                   AudioCommon::AudioRendererParameter params, | ||||
|                   Stream::ReleaseCallback&& release_callback, std::size_t instance_number); | ||||
|                   std::shared_ptr<Kernel::WritableEvent> buffer_event_, | ||||
|                   std::size_t instance_number); | ||||
|     ~AudioRenderer(); | ||||
|  | ||||
|     [[nodiscard]] ResultCode UpdateAudioRenderer(const std::vector<u8>& input_params, | ||||
| @@ -56,6 +61,7 @@ private: | ||||
|     BehaviorInfo behavior_info{}; | ||||
|  | ||||
|     AudioCommon::AudioRendererParameter worker_params; | ||||
|     std::shared_ptr<Kernel::WritableEvent> buffer_event; | ||||
|     std::vector<ServerMemoryPoolInfo> memory_pool_info; | ||||
|     VoiceContext voice_context; | ||||
|     EffectContext effect_context; | ||||
|   | ||||
| @@ -130,11 +130,7 @@ bool Stream::ContainsBuffer([[maybe_unused]] Buffer::Tag tag) const { | ||||
| std::vector<Buffer::Tag> Stream::GetTagsAndReleaseBuffers(std::size_t max_count) { | ||||
|     std::vector<Buffer::Tag> tags; | ||||
|     for (std::size_t count = 0; count < max_count && !released_buffers.empty(); ++count) { | ||||
|         if (released_buffers.front()) { | ||||
|             tags.push_back(released_buffers.front()->GetTag()); | ||||
|         } else { | ||||
|             ASSERT_MSG(false, "Invalid tag in released_buffers!"); | ||||
|         } | ||||
|         tags.push_back(released_buffers.front()->GetTag()); | ||||
|         released_buffers.pop(); | ||||
|     } | ||||
|     return tags; | ||||
| @@ -144,11 +140,7 @@ std::vector<Buffer::Tag> Stream::GetTagsAndReleaseBuffers() { | ||||
|     std::vector<Buffer::Tag> tags; | ||||
|     tags.reserve(released_buffers.size()); | ||||
|     while (!released_buffers.empty()) { | ||||
|         if (released_buffers.front()) { | ||||
|             tags.push_back(released_buffers.front()->GetTag()); | ||||
|         } else { | ||||
|             ASSERT_MSG(false, "Invalid tag in released_buffers!"); | ||||
|         } | ||||
|         tags.push_back(released_buffers.front()->GetTag()); | ||||
|         released_buffers.pop(); | ||||
|     } | ||||
|     return tags; | ||||
|   | ||||
| @@ -201,8 +201,6 @@ add_library(core STATIC | ||||
|     hle/kernel/server_port.h | ||||
|     hle/kernel/server_session.cpp | ||||
|     hle/kernel/server_session.h | ||||
|     hle/kernel/service_thread.cpp | ||||
|     hle/kernel/service_thread.h | ||||
|     hle/kernel/session.cpp | ||||
|     hle/kernel/session.h | ||||
|     hle/kernel/shared_memory.cpp | ||||
| @@ -501,6 +499,7 @@ add_library(core STATIC | ||||
|     hle/service/sm/controller.h | ||||
|     hle/service/sm/sm.cpp | ||||
|     hle/service/sm/sm.h | ||||
|     hle/service/sockets/blocking_worker.h | ||||
|     hle/service/sockets/bsd.cpp | ||||
|     hle/service/sockets/bsd.h | ||||
|     hle/service/sockets/ethc.cpp | ||||
|   | ||||
| @@ -159,7 +159,7 @@ struct System::Impl { | ||||
|         device_memory = std::make_unique<Core::DeviceMemory>(); | ||||
|  | ||||
|         is_multicore = Settings::values.use_multi_core.GetValue(); | ||||
|         is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue(); | ||||
|         is_async_gpu = is_multicore || Settings::values.use_asynchronous_gpu_emulation.GetValue(); | ||||
|  | ||||
|         kernel.SetMulticore(is_multicore); | ||||
|         cpu_manager.SetMulticore(is_multicore); | ||||
| @@ -307,6 +307,7 @@ struct System::Impl { | ||||
|         service_manager.reset(); | ||||
|         cheat_engine.reset(); | ||||
|         telemetry_session.reset(); | ||||
|         device_memory.reset(); | ||||
|  | ||||
|         // Close all CPU/threading state | ||||
|         cpu_manager.Shutdown(); | ||||
|   | ||||
| @@ -46,6 +46,43 @@ void SessionRequestHandler::ClientDisconnected( | ||||
|     boost::range::remove_erase(connected_sessions, server_session); | ||||
| } | ||||
|  | ||||
| std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread( | ||||
|     const std::string& reason, u64 timeout, WakeupCallback&& callback, | ||||
|     std::shared_ptr<WritableEvent> writable_event) { | ||||
|     // Put the client thread to sleep until the wait event is signaled or the timeout expires. | ||||
|  | ||||
|     if (!writable_event) { | ||||
|         // Create event if not provided | ||||
|         const auto pair = WritableEvent::CreateEventPair(kernel, "HLE Pause Event: " + reason); | ||||
|         writable_event = pair.writable; | ||||
|     } | ||||
|  | ||||
|     Handle event_handle = InvalidHandle; | ||||
|     { | ||||
|         KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout); | ||||
|         thread->SetHLECallback( | ||||
|             [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool { | ||||
|                 ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT | ||||
|                                                 ? ThreadWakeupReason::Timeout | ||||
|                                                 : ThreadWakeupReason::Signal; | ||||
|                 callback(thread, context, reason); | ||||
|                 context.WriteToOutgoingCommandBuffer(*thread); | ||||
|                 return true; | ||||
|             }); | ||||
|         const auto readable_event{writable_event->GetReadableEvent()}; | ||||
|         writable_event->Clear(); | ||||
|         thread->SetHLESyncObject(readable_event.get()); | ||||
|         thread->SetStatus(ThreadStatus::WaitHLEEvent); | ||||
|         thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); | ||||
|         readable_event->AddWaitingThread(thread); | ||||
|     } | ||||
|     thread->SetHLETimeEvent(event_handle); | ||||
|  | ||||
|     is_thread_waiting = true; | ||||
|  | ||||
|     return writable_event; | ||||
| } | ||||
|  | ||||
| HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, | ||||
|                                      std::shared_ptr<ServerSession> server_session, | ||||
|                                      std::shared_ptr<Thread> thread) | ||||
|   | ||||
| @@ -129,6 +129,23 @@ public: | ||||
|     using WakeupCallback = std::function<void( | ||||
|         std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>; | ||||
|  | ||||
|     /** | ||||
|      * Puts the specified guest thread to sleep until the returned event is signaled or until the | ||||
|      * specified timeout expires. | ||||
|      * @param reason Reason for pausing the thread, to be used for debugging purposes. | ||||
|      * @param timeout Timeout in nanoseconds after which the thread will be awoken and the callback | ||||
|      * invoked with a Timeout reason. | ||||
|      * @param callback Callback to be invoked when the thread is resumed. This callback must write | ||||
|      * the entire command response once again, regardless of the state of it before this function | ||||
|      * was called. | ||||
|      * @param writable_event Event to use to wake up the thread. If unspecified, an event will be | ||||
|      * created. | ||||
|      * @returns Event that when signaled will resume the thread and call the callback function. | ||||
|      */ | ||||
|     std::shared_ptr<WritableEvent> SleepClientThread( | ||||
|         const std::string& reason, u64 timeout, WakeupCallback&& callback, | ||||
|         std::shared_ptr<WritableEvent> writable_event = nullptr); | ||||
|  | ||||
|     /// Populates this context with data from the requesting process/thread. | ||||
|     ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, | ||||
|                                                  u32_le* src_cmdbuf); | ||||
|   | ||||
| @@ -8,7 +8,7 @@ | ||||
| #include <functional> | ||||
| #include <memory> | ||||
| #include <thread> | ||||
| #include <unordered_set> | ||||
| #include <unordered_map> | ||||
| #include <utility> | ||||
|  | ||||
| #include "common/assert.h" | ||||
| @@ -35,7 +35,6 @@ | ||||
| #include "core/hle/kernel/physical_core.h" | ||||
| #include "core/hle/kernel/process.h" | ||||
| #include "core/hle/kernel/resource_limit.h" | ||||
| #include "core/hle/kernel/service_thread.h" | ||||
| #include "core/hle/kernel/shared_memory.h" | ||||
| #include "core/hle/kernel/synchronization.h" | ||||
| #include "core/hle/kernel/thread.h" | ||||
| @@ -58,8 +57,6 @@ struct KernelCore::Impl { | ||||
|     } | ||||
|  | ||||
|     void Initialize(KernelCore& kernel) { | ||||
|         process_list.clear(); | ||||
|  | ||||
|         RegisterHostThread(); | ||||
|  | ||||
|         global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); | ||||
| @@ -92,6 +89,8 @@ struct KernelCore::Impl { | ||||
|  | ||||
|         cores.clear(); | ||||
|  | ||||
|         process_list.clear(); | ||||
|  | ||||
|         current_process = nullptr; | ||||
|  | ||||
|         system_resource_limit = nullptr; | ||||
| @@ -108,9 +107,6 @@ struct KernelCore::Impl { | ||||
|         std::fill(register_host_thread_keys.begin(), register_host_thread_keys.end(), | ||||
|                   std::thread::id{}); | ||||
|         std::fill(register_host_thread_values.begin(), register_host_thread_values.end(), 0); | ||||
|  | ||||
|         // Ensures all service threads gracefully shutdown | ||||
|         service_threads.clear(); | ||||
|     } | ||||
|  | ||||
|     void InitializePhysicalCores() { | ||||
| @@ -333,7 +329,7 @@ struct KernelCore::Impl { | ||||
|     std::atomic<u32> registered_thread_ids{Core::Hardware::NUM_CPU_CORES}; | ||||
|  | ||||
|     // Number of host threads is a relatively high number to avoid overflowing | ||||
|     static constexpr size_t NUM_REGISTRABLE_HOST_THREADS = 1024; | ||||
|     static constexpr size_t NUM_REGISTRABLE_HOST_THREADS = 64; | ||||
|     std::atomic<size_t> num_host_threads{0}; | ||||
|     std::array<std::atomic<std::thread::id>, NUM_REGISTRABLE_HOST_THREADS> | ||||
|         register_host_thread_keys{}; | ||||
| @@ -349,9 +345,6 @@ struct KernelCore::Impl { | ||||
|     std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; | ||||
|     std::shared_ptr<Kernel::SharedMemory> time_shared_mem; | ||||
|  | ||||
|     // Threads used for services | ||||
|     std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; | ||||
|  | ||||
|     std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; | ||||
|     std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; | ||||
|     std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; | ||||
| @@ -646,16 +639,4 @@ void KernelCore::ExitSVCProfile() { | ||||
|     MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); | ||||
| } | ||||
|  | ||||
| std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | ||||
|     auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name); | ||||
|     impl->service_threads.emplace(service_thread); | ||||
|     return service_thread; | ||||
| } | ||||
|  | ||||
| void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) { | ||||
|     if (auto strong_ptr = service_thread.lock()) { | ||||
|         impl->service_threads.erase(strong_ptr); | ||||
|     } | ||||
| } | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -42,7 +42,6 @@ class Process; | ||||
| class ResourceLimit; | ||||
| class KScheduler; | ||||
| class SharedMemory; | ||||
| class ServiceThread; | ||||
| class Synchronization; | ||||
| class Thread; | ||||
| class TimeManager; | ||||
| @@ -228,22 +227,6 @@ public: | ||||
|  | ||||
|     void ExitSVCProfile(); | ||||
|  | ||||
|     /** | ||||
|      * Creates an HLE service thread, which are used to execute service routines asynchronously. | ||||
|      * While these are allocated per ServerSession, these need to be owned and managed outside of | ||||
|      * ServerSession to avoid a circular dependency. | ||||
|      * @param name String name for the ServerSession creating this thread, used for debug purposes. | ||||
|      * @returns The a weak pointer newly created service thread. | ||||
|      */ | ||||
|     std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name); | ||||
|  | ||||
|     /** | ||||
|      * Releases a HLE service thread, instructing KernelCore to free it. This should be called when | ||||
|      * the ServerSession associated with the thread is destroyed. | ||||
|      * @param service_thread Service thread to release. | ||||
|      */ | ||||
|     void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread); | ||||
|  | ||||
| private: | ||||
|     friend class Object; | ||||
|     friend class Process; | ||||
|   | ||||
| @@ -25,19 +25,19 @@ | ||||
| namespace Kernel { | ||||
|  | ||||
| ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {} | ||||
|  | ||||
| ServerSession::~ServerSession() { | ||||
|     kernel.ReleaseServiceThread(service_thread); | ||||
| } | ||||
| ServerSession::~ServerSession() = default; | ||||
|  | ||||
| ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel, | ||||
|                                                                 std::shared_ptr<Session> parent, | ||||
|                                                                 std::string name) { | ||||
|     std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)}; | ||||
|  | ||||
|     session->request_event = | ||||
|         Core::Timing::CreateEvent(name, [session](std::uintptr_t, std::chrono::nanoseconds) { | ||||
|             session->CompleteSyncRequest(); | ||||
|         }); | ||||
|     session->name = std::move(name); | ||||
|     session->parent = std::move(parent); | ||||
|     session->service_thread = kernel.CreateServiceThread(session->name); | ||||
|  | ||||
|     return MakeResult(std::move(session)); | ||||
| } | ||||
| @@ -142,16 +142,16 @@ ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread, | ||||
|         std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread)); | ||||
|  | ||||
|     context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); | ||||
|  | ||||
|     if (auto strong_ptr = service_thread.lock()) { | ||||
|         strong_ptr->QueueSyncRequest(*this, std::move(context)); | ||||
|         return RESULT_SUCCESS; | ||||
|     } | ||||
|     request_queue.Push(std::move(context)); | ||||
|  | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
|  | ||||
| ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) { | ||||
| ResultCode ServerSession::CompleteSyncRequest() { | ||||
|     ASSERT(!request_queue.Empty()); | ||||
|  | ||||
|     auto& context = *request_queue.Front(); | ||||
|  | ||||
|     ResultCode result = RESULT_SUCCESS; | ||||
|     // If the session has been converted to a domain, handle the domain request | ||||
|     if (IsDomain() && context.HasDomainMessageHeader()) { | ||||
| @@ -177,13 +177,18 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) { | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     request_queue.Pop(); | ||||
|  | ||||
|     return result; | ||||
| } | ||||
|  | ||||
| ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, | ||||
|                                             Core::Memory::Memory& memory, | ||||
|                                             Core::Timing::CoreTiming& core_timing) { | ||||
|     return QueueSyncRequest(std::move(thread), memory); | ||||
|     const ResultCode result = QueueSyncRequest(std::move(thread), memory); | ||||
|     const auto delay = std::chrono::nanoseconds{kernel.IsMulticore() ? 0 : 20000}; | ||||
|     core_timing.ScheduleEvent(delay, request_event, {}); | ||||
|     return result; | ||||
| } | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -10,7 +10,6 @@ | ||||
| #include <vector> | ||||
|  | ||||
| #include "common/threadsafe_queue.h" | ||||
| #include "core/hle/kernel/service_thread.h" | ||||
| #include "core/hle/kernel/synchronization_object.h" | ||||
| #include "core/hle/result.h" | ||||
|  | ||||
| @@ -44,8 +43,6 @@ class Thread; | ||||
|  * TLS buffer and control is transferred back to it. | ||||
|  */ | ||||
| class ServerSession final : public SynchronizationObject { | ||||
|     friend class ServiceThread; | ||||
|  | ||||
| public: | ||||
|     explicit ServerSession(KernelCore& kernel); | ||||
|     ~ServerSession() override; | ||||
| @@ -135,7 +132,7 @@ private: | ||||
|     ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory); | ||||
|  | ||||
|     /// Completes a sync request from the emulated application. | ||||
|     ResultCode CompleteSyncRequest(HLERequestContext& context); | ||||
|     ResultCode CompleteSyncRequest(); | ||||
|  | ||||
|     /// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an | ||||
|     /// object handle. | ||||
| @@ -166,8 +163,11 @@ private: | ||||
|     /// The name of this session (optional) | ||||
|     std::string name; | ||||
|  | ||||
|     /// Thread to dispatch service requests | ||||
|     std::weak_ptr<ServiceThread> service_thread; | ||||
|     /// Core timing event used to schedule the service request at some point in the future | ||||
|     std::shared_ptr<Core::Timing::EventType> request_event; | ||||
|  | ||||
|     /// Queue of scheduled service requests | ||||
|     Common::MPSCQueue<std::shared_ptr<Kernel::HLERequestContext>> request_queue; | ||||
| }; | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -70,10 +70,8 @@ public: | ||||
|             Kernel::WritableEvent::CreateEventPair(system.Kernel(), "IAudioOutBufferReleased"); | ||||
|  | ||||
|         stream = audio_core.OpenStream(system.CoreTiming(), audio_params.sample_rate, | ||||
|                                        audio_params.channel_count, std::move(unique_name), [this] { | ||||
|                                            const auto guard = LockService(); | ||||
|                                            buffer_event.writable->Signal(); | ||||
|                                        }); | ||||
|                                        audio_params.channel_count, std::move(unique_name), | ||||
|                                        [this] { buffer_event.writable->Signal(); }); | ||||
|     } | ||||
|  | ||||
| private: | ||||
|   | ||||
| @@ -49,16 +49,16 @@ public: | ||||
|  | ||||
|         system_event = | ||||
|             Kernel::WritableEvent::CreateEventPair(system.Kernel(), "IAudioRenderer:SystemEvent"); | ||||
|         renderer = std::make_unique<AudioCore::AudioRenderer>( | ||||
|             system.CoreTiming(), system.Memory(), audren_params, | ||||
|             [this]() { | ||||
|                 const auto guard = LockService(); | ||||
|                 system_event.writable->Signal(); | ||||
|             }, | ||||
|             instance_number); | ||||
|         renderer = std::make_unique<AudioCore::AudioRenderer>(system.CoreTiming(), system.Memory(), | ||||
|                                                               audren_params, system_event.writable, | ||||
|                                                               instance_number); | ||||
|     } | ||||
|  | ||||
| private: | ||||
|     void UpdateAudioCallback() { | ||||
|         system_event.writable->Signal(); | ||||
|     } | ||||
|  | ||||
|     void GetSampleRate(Kernel::HLERequestContext& ctx) { | ||||
|         LOG_DEBUG(Service_Audio, "called"); | ||||
|  | ||||
|   | ||||
| @@ -78,13 +78,11 @@ IAppletResource::IAppletResource(Core::System& system_) | ||||
|     pad_update_event = Core::Timing::CreateEvent( | ||||
|         "HID::UpdatePadCallback", | ||||
|         [this](std::uintptr_t user_data, std::chrono::nanoseconds ns_late) { | ||||
|             const auto guard = LockService(); | ||||
|             UpdateControllers(user_data, ns_late); | ||||
|         }); | ||||
|     motion_update_event = Core::Timing::CreateEvent( | ||||
|         "HID::MotionPadCallback", | ||||
|         [this](std::uintptr_t user_data, std::chrono::nanoseconds ns_late) { | ||||
|             const auto guard = LockService(); | ||||
|             UpdateMotion(user_data, ns_late); | ||||
|         }); | ||||
|  | ||||
|   | ||||
| @@ -31,8 +31,8 @@ public: | ||||
|      * @param output A buffer where the output data will be written to. | ||||
|      * @returns The result code of the ioctl. | ||||
|      */ | ||||
|     virtual NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, | ||||
|                             std::vector<u8>& output) = 0; | ||||
|     virtual NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                             IoctlCtrl& ctrl) = 0; | ||||
|  | ||||
|     /** | ||||
|      * Handles an ioctl2 request. | ||||
| @@ -43,7 +43,8 @@ public: | ||||
|      * @returns The result code of the ioctl. | ||||
|      */ | ||||
|     virtual NvResult Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                             const std::vector<u8>& inline_input, std::vector<u8>& output) = 0; | ||||
|                             const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                             IoctlCtrl& ctrl) = 0; | ||||
|  | ||||
|     /** | ||||
|      * Handles an ioctl3 request. | ||||
| @@ -54,7 +55,7 @@ public: | ||||
|      * @returns The result code of the ioctl. | ||||
|      */ | ||||
|     virtual NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                             std::vector<u8>& inline_output) = 0; | ||||
|                             std::vector<u8>& inline_output, IoctlCtrl& ctrl) = 0; | ||||
|  | ||||
| protected: | ||||
|     Core::System& system; | ||||
|   | ||||
| @@ -18,20 +18,21 @@ nvdisp_disp0::nvdisp_disp0(Core::System& system, std::shared_ptr<nvmap> nvmap_de | ||||
|     : nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {} | ||||
| nvdisp_disp0 ::~nvdisp_disp0() = default; | ||||
|  | ||||
| NvResult nvdisp_disp0::Ioctl1(Ioctl command, const std::vector<u8>& input, | ||||
|                               std::vector<u8>& output) { | ||||
| NvResult nvdisp_disp0::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                               IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|  | ||||
| NvResult nvdisp_disp0::Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                               const std::vector<u8>& inline_input, std::vector<u8>& output) { | ||||
|                               const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                               IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|  | ||||
| NvResult nvdisp_disp0::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                               std::vector<u8>& inline_output) { | ||||
|                               std::vector<u8>& inline_output, IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|   | ||||
| @@ -20,11 +20,13 @@ public: | ||||
|     explicit nvdisp_disp0(Core::System& system, std::shared_ptr<nvmap> nvmap_dev); | ||||
|     ~nvdisp_disp0() override; | ||||
|  | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output) override; | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     std::vector<u8>& inline_output) override; | ||||
|                     std::vector<u8>& inline_output, IoctlCtrl& ctrl) override; | ||||
|  | ||||
|     /// Performs a screen flip, drawing the buffer pointed to by the handle. | ||||
|     void flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height, u32 stride, | ||||
|   | ||||
| @@ -21,8 +21,8 @@ nvhost_as_gpu::nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_ | ||||
|     : nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {} | ||||
| nvhost_as_gpu::~nvhost_as_gpu() = default; | ||||
|  | ||||
| NvResult nvhost_as_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input, | ||||
|                                std::vector<u8>& output) { | ||||
| NvResult nvhost_as_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                                IoctlCtrl& ctrl) { | ||||
|     switch (command.group) { | ||||
|     case 'A': | ||||
|         switch (command.cmd) { | ||||
| @@ -55,13 +55,14 @@ NvResult nvhost_as_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input, | ||||
| } | ||||
|  | ||||
| NvResult nvhost_as_gpu::Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                                const std::vector<u8>& inline_input, std::vector<u8>& output) { | ||||
|                                const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                                IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|  | ||||
| NvResult nvhost_as_gpu::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                                std::vector<u8>& inline_output) { | ||||
|                                std::vector<u8>& inline_output, IoctlCtrl& ctrl) { | ||||
|     switch (command.group) { | ||||
|     case 'A': | ||||
|         switch (command.cmd) { | ||||
|   | ||||
| @@ -30,11 +30,13 @@ public: | ||||
|     explicit nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev); | ||||
|     ~nvhost_as_gpu() override; | ||||
|  | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output) override; | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     std::vector<u8>& inline_output) override; | ||||
|                     std::vector<u8>& inline_output, IoctlCtrl& ctrl) override; | ||||
|  | ||||
| private: | ||||
|     class BufferMap final { | ||||
|   | ||||
| @@ -20,7 +20,8 @@ nvhost_ctrl::nvhost_ctrl(Core::System& system, EventInterface& events_interface, | ||||
|     : nvdevice(system), events_interface{events_interface}, syncpoint_manager{syncpoint_manager} {} | ||||
| nvhost_ctrl::~nvhost_ctrl() = default; | ||||
|  | ||||
| NvResult nvhost_ctrl::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { | ||||
| NvResult nvhost_ctrl::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                              IoctlCtrl& ctrl) { | ||||
|     switch (command.group) { | ||||
|     case 0x0: | ||||
|         switch (command.cmd) { | ||||
| @@ -29,9 +30,9 @@ NvResult nvhost_ctrl::Ioctl1(Ioctl command, const std::vector<u8>& input, std::v | ||||
|         case 0x1c: | ||||
|             return IocCtrlClearEventWait(input, output); | ||||
|         case 0x1d: | ||||
|             return IocCtrlEventWait(input, output, false); | ||||
|             return IocCtrlEventWait(input, output, false, ctrl); | ||||
|         case 0x1e: | ||||
|             return IocCtrlEventWait(input, output, true); | ||||
|             return IocCtrlEventWait(input, output, true, ctrl); | ||||
|         case 0x1f: | ||||
|             return IocCtrlEventRegister(input, output); | ||||
|         case 0x20: | ||||
| @@ -47,13 +48,14 @@ NvResult nvhost_ctrl::Ioctl1(Ioctl command, const std::vector<u8>& input, std::v | ||||
| } | ||||
|  | ||||
| NvResult nvhost_ctrl::Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                              const std::vector<u8>& inline_input, std::vector<u8>& output) { | ||||
|                              const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                              IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|  | ||||
| NvResult nvhost_ctrl::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                              std::vector<u8>& inline_outpu) { | ||||
|                              std::vector<u8>& inline_output, IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
| @@ -67,7 +69,7 @@ NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector | ||||
| } | ||||
|  | ||||
| NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                                        bool is_async) { | ||||
|                                        bool is_async, IoctlCtrl& ctrl) { | ||||
|     IocCtrlEventWaitParams params{}; | ||||
|     std::memcpy(¶ms, input.data(), sizeof(params)); | ||||
|     LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_async={}", | ||||
| @@ -139,6 +141,12 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector | ||||
|         params.value |= event_id; | ||||
|         event.event.writable->Clear(); | ||||
|         gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value); | ||||
|         if (!is_async && ctrl.fresh_call) { | ||||
|             ctrl.must_delay = true; | ||||
|             ctrl.timeout = params.timeout; | ||||
|             ctrl.event_id = event_id; | ||||
|             return NvResult::Timeout; | ||||
|         } | ||||
|         std::memcpy(output.data(), ¶ms, sizeof(params)); | ||||
|         return NvResult::Timeout; | ||||
|     } | ||||
|   | ||||
| @@ -18,11 +18,13 @@ public: | ||||
|                          SyncpointManager& syncpoint_manager); | ||||
|     ~nvhost_ctrl() override; | ||||
|  | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output) override; | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     std::vector<u8>& inline_output) override; | ||||
|                     std::vector<u8>& inline_output, IoctlCtrl& ctrl) override; | ||||
|  | ||||
| private: | ||||
|     struct IocSyncptReadParams { | ||||
| @@ -121,7 +123,8 @@ private: | ||||
|     static_assert(sizeof(IocCtrlEventKill) == 8, "IocCtrlEventKill is incorrect size"); | ||||
|  | ||||
|     NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output); | ||||
|     NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async); | ||||
|     NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async, | ||||
|                               IoctlCtrl& ctrl); | ||||
|     NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output); | ||||
|     NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output); | ||||
|     NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output); | ||||
|   | ||||
| @@ -16,7 +16,7 @@ nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system) : nvdevice(system) {} | ||||
| nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default; | ||||
|  | ||||
| NvResult nvhost_ctrl_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input, | ||||
|                                  std::vector<u8>& output) { | ||||
|                                  std::vector<u8>& output, IoctlCtrl& ctrl) { | ||||
|     switch (command.group) { | ||||
|     case 'G': | ||||
|         switch (command.cmd) { | ||||
| @@ -48,13 +48,15 @@ NvResult nvhost_ctrl_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input, | ||||
| } | ||||
|  | ||||
| NvResult nvhost_ctrl_gpu::Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                                  const std::vector<u8>& inline_input, std::vector<u8>& output) { | ||||
|                                  const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                                  IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|  | ||||
| NvResult nvhost_ctrl_gpu::Ioctl3(Ioctl command, const std::vector<u8>& input, | ||||
|                                  std::vector<u8>& output, std::vector<u8>& inline_output) { | ||||
|                                  std::vector<u8>& output, std::vector<u8>& inline_output, | ||||
|                                  IoctlCtrl& ctrl) { | ||||
|     switch (command.group) { | ||||
|     case 'G': | ||||
|         switch (command.cmd) { | ||||
|   | ||||
| @@ -16,11 +16,13 @@ public: | ||||
|     explicit nvhost_ctrl_gpu(Core::System& system); | ||||
|     ~nvhost_ctrl_gpu() override; | ||||
|  | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output) override; | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     std::vector<u8>& inline_output) override; | ||||
|                     std::vector<u8>& inline_output, IoctlCtrl& ctrl) override; | ||||
|  | ||||
| private: | ||||
|     struct IoctlGpuCharacteristics { | ||||
|   | ||||
| @@ -23,7 +23,8 @@ nvhost_gpu::nvhost_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev, | ||||
|  | ||||
| nvhost_gpu::~nvhost_gpu() = default; | ||||
|  | ||||
| NvResult nvhost_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { | ||||
| NvResult nvhost_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                             IoctlCtrl& ctrl) { | ||||
|     switch (command.group) { | ||||
|     case 0x0: | ||||
|         switch (command.cmd) { | ||||
| @@ -75,7 +76,8 @@ NvResult nvhost_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input, std::ve | ||||
| }; | ||||
|  | ||||
| NvResult nvhost_gpu::Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                             const std::vector<u8>& inline_input, std::vector<u8>& output) { | ||||
|                             const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                             IoctlCtrl& ctrl) { | ||||
|     switch (command.group) { | ||||
|     case 'H': | ||||
|         switch (command.cmd) { | ||||
| @@ -89,7 +91,7 @@ NvResult nvhost_gpu::Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
| } | ||||
|  | ||||
| NvResult nvhost_gpu::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                             std::vector<u8>& inline_output) { | ||||
|                             std::vector<u8>& inline_output, IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|   | ||||
| @@ -26,11 +26,13 @@ public: | ||||
|                         SyncpointManager& syncpoint_manager); | ||||
|     ~nvhost_gpu() override; | ||||
|  | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output) override; | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     std::vector<u8>& inline_output) override; | ||||
|                     std::vector<u8>& inline_output, IoctlCtrl& ctrl) override; | ||||
|  | ||||
| private: | ||||
|     enum class CtxObjects : u32_le { | ||||
|   | ||||
| @@ -15,8 +15,8 @@ nvhost_nvdec::nvhost_nvdec(Core::System& system, std::shared_ptr<nvmap> nvmap_de | ||||
|     : nvhost_nvdec_common(system, std::move(nvmap_dev)) {} | ||||
| nvhost_nvdec::~nvhost_nvdec() = default; | ||||
|  | ||||
| NvResult nvhost_nvdec::Ioctl1(Ioctl command, const std::vector<u8>& input, | ||||
|                               std::vector<u8>& output) { | ||||
| NvResult nvhost_nvdec::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                               IoctlCtrl& ctrl) { | ||||
|     switch (command.group) { | ||||
|     case 0x0: | ||||
|         switch (command.cmd) { | ||||
| @@ -59,13 +59,14 @@ NvResult nvhost_nvdec::Ioctl1(Ioctl command, const std::vector<u8>& input, | ||||
| } | ||||
|  | ||||
| NvResult nvhost_nvdec::Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                               const std::vector<u8>& inline_input, std::vector<u8>& output) { | ||||
|                               const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                               IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|  | ||||
| NvResult nvhost_nvdec::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                               std::vector<u8>& inline_output) { | ||||
|                               std::vector<u8>& inline_output, IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|   | ||||
| @@ -14,11 +14,13 @@ public: | ||||
|     explicit nvhost_nvdec(Core::System& system, std::shared_ptr<nvmap> nvmap_dev); | ||||
|     ~nvhost_nvdec() override; | ||||
|  | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output) override; | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     std::vector<u8>& inline_output) override; | ||||
|                     std::vector<u8>& inline_output, IoctlCtrl& ctrl) override; | ||||
| }; | ||||
|  | ||||
| } // namespace Service::Nvidia::Devices | ||||
|   | ||||
| @@ -13,8 +13,8 @@ namespace Service::Nvidia::Devices { | ||||
| nvhost_nvjpg::nvhost_nvjpg(Core::System& system) : nvdevice(system) {} | ||||
| nvhost_nvjpg::~nvhost_nvjpg() = default; | ||||
|  | ||||
| NvResult nvhost_nvjpg::Ioctl1(Ioctl command, const std::vector<u8>& input, | ||||
|                               std::vector<u8>& output) { | ||||
| NvResult nvhost_nvjpg::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                               IoctlCtrl& ctrl) { | ||||
|     switch (command.group) { | ||||
|     case 'H': | ||||
|         switch (command.cmd) { | ||||
| @@ -33,13 +33,14 @@ NvResult nvhost_nvjpg::Ioctl1(Ioctl command, const std::vector<u8>& input, | ||||
| } | ||||
|  | ||||
| NvResult nvhost_nvjpg::Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                               const std::vector<u8>& inline_input, std::vector<u8>& output) { | ||||
|                               const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                               IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|  | ||||
| NvResult nvhost_nvjpg::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                               std::vector<u8>& inline_output) { | ||||
|                               std::vector<u8>& inline_output, IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|   | ||||
| @@ -16,11 +16,13 @@ public: | ||||
|     explicit nvhost_nvjpg(Core::System& system); | ||||
|     ~nvhost_nvjpg() override; | ||||
|  | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output) override; | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     std::vector<u8>& inline_output) override; | ||||
|                     std::vector<u8>& inline_output, IoctlCtrl& ctrl) override; | ||||
|  | ||||
| private: | ||||
|     struct IoctlSetNvmapFD { | ||||
|   | ||||
| @@ -16,7 +16,8 @@ nvhost_vic::nvhost_vic(Core::System& system, std::shared_ptr<nvmap> nvmap_dev) | ||||
| } | ||||
| nvhost_vic::~nvhost_vic() = default; | ||||
|  | ||||
| NvResult nvhost_vic::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { | ||||
| NvResult nvhost_vic::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                             IoctlCtrl& ctrl) { | ||||
|     switch (command.group) { | ||||
|     case 0x0: | ||||
|         switch (command.cmd) { | ||||
| @@ -51,13 +52,14 @@ NvResult nvhost_vic::Ioctl1(Ioctl command, const std::vector<u8>& input, std::ve | ||||
| } | ||||
|  | ||||
| NvResult nvhost_vic::Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                             const std::vector<u8>& inline_input, std::vector<u8>& output) { | ||||
|                             const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                             IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|  | ||||
| NvResult nvhost_vic::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                             std::vector<u8>& inline_output) { | ||||
|                             std::vector<u8>& inline_output, IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|   | ||||
| @@ -14,10 +14,12 @@ public: | ||||
|     explicit nvhost_vic(Core::System& system, std::shared_ptr<nvmap> nvmap_dev); | ||||
|     ~nvhost_vic(); | ||||
|  | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output) override; | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     std::vector<u8>& inline_output) override; | ||||
|                     std::vector<u8>& inline_output, IoctlCtrl& ctrl) override; | ||||
| }; | ||||
| } // namespace Service::Nvidia::Devices | ||||
|   | ||||
| @@ -19,7 +19,8 @@ nvmap::nvmap(Core::System& system) : nvdevice(system) { | ||||
|  | ||||
| nvmap::~nvmap() = default; | ||||
|  | ||||
| NvResult nvmap::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { | ||||
| NvResult nvmap::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                        IoctlCtrl& ctrl) { | ||||
|     switch (command.group) { | ||||
|     case 0x1: | ||||
|         switch (command.cmd) { | ||||
| @@ -48,13 +49,14 @@ NvResult nvmap::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector< | ||||
| } | ||||
|  | ||||
| NvResult nvmap::Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                        const std::vector<u8>& inline_input, std::vector<u8>& output) { | ||||
|                        const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                        IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|  | ||||
| NvResult nvmap::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                        std::vector<u8>& inline_output) { | ||||
|                        std::vector<u8>& inline_output, IoctlCtrl& ctrl) { | ||||
|     UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw); | ||||
|     return NvResult::NotImplemented; | ||||
| } | ||||
|   | ||||
| @@ -19,11 +19,13 @@ public: | ||||
|     explicit nvmap(Core::System& system); | ||||
|     ~nvmap() override; | ||||
|  | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; | ||||
|     NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl2(Ioctl command, const std::vector<u8>& input, | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output) override; | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                     IoctlCtrl& ctrl) override; | ||||
|     NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                     std::vector<u8>& inline_output) override; | ||||
|                     std::vector<u8>& inline_output, IoctlCtrl& ctrl) override; | ||||
|  | ||||
|     /// Returns the allocated address of an nvmap object given its handle. | ||||
|     VAddr GetObjectAddress(u32 handle) const; | ||||
|   | ||||
| @@ -61,9 +61,32 @@ void NVDRV::Ioctl1(Kernel::HLERequestContext& ctx) { | ||||
|     std::vector<u8> output_buffer(ctx.GetWriteBufferSize(0)); | ||||
|     const auto input_buffer = ctx.ReadBuffer(0); | ||||
|  | ||||
|     const auto nv_result = nvdrv->Ioctl1(fd, command, input_buffer, output_buffer); | ||||
|     if (command.is_out != 0) { | ||||
|         ctx.WriteBuffer(output_buffer); | ||||
|     IoctlCtrl ctrl{}; | ||||
|  | ||||
|     const auto nv_result = nvdrv->Ioctl1(fd, command, input_buffer, output_buffer, ctrl); | ||||
|     if (ctrl.must_delay) { | ||||
|         ctrl.fresh_call = false; | ||||
|         ctx.SleepClientThread( | ||||
|             "NVServices::DelayedResponse", ctrl.timeout, | ||||
|             [=, this](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx_, | ||||
|                       Kernel::ThreadWakeupReason reason) { | ||||
|                 IoctlCtrl ctrl2{ctrl}; | ||||
|                 std::vector<u8> tmp_output = output_buffer; | ||||
|                 const auto nv_result2 = nvdrv->Ioctl1(fd, command, input_buffer, tmp_output, ctrl2); | ||||
|  | ||||
|                 if (command.is_out != 0) { | ||||
|                     ctx.WriteBuffer(tmp_output); | ||||
|                 } | ||||
|  | ||||
|                 IPC::ResponseBuilder rb{ctx_, 3}; | ||||
|                 rb.Push(RESULT_SUCCESS); | ||||
|                 rb.PushEnum(nv_result2); | ||||
|             }, | ||||
|             nvdrv->GetEventWriteable(ctrl.event_id)); | ||||
|     } else { | ||||
|         if (command.is_out != 0) { | ||||
|             ctx.WriteBuffer(output_buffer); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     IPC::ResponseBuilder rb{ctx, 3}; | ||||
| @@ -87,8 +110,36 @@ void NVDRV::Ioctl2(Kernel::HLERequestContext& ctx) { | ||||
|     const auto input_inlined_buffer = ctx.ReadBuffer(1); | ||||
|     std::vector<u8> output_buffer(ctx.GetWriteBufferSize(0)); | ||||
|  | ||||
|     IoctlCtrl ctrl{}; | ||||
|  | ||||
|     const auto nv_result = | ||||
|         nvdrv->Ioctl2(fd, command, input_buffer, input_inlined_buffer, output_buffer); | ||||
|         nvdrv->Ioctl2(fd, command, input_buffer, input_inlined_buffer, output_buffer, ctrl); | ||||
|     if (ctrl.must_delay) { | ||||
|         ctrl.fresh_call = false; | ||||
|         ctx.SleepClientThread( | ||||
|             "NVServices::DelayedResponse", ctrl.timeout, | ||||
|             [=, this](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx_, | ||||
|                       Kernel::ThreadWakeupReason reason) { | ||||
|                 IoctlCtrl ctrl2{ctrl}; | ||||
|                 std::vector<u8> tmp_output = output_buffer; | ||||
|                 const auto nv_result2 = nvdrv->Ioctl2(fd, command, input_buffer, | ||||
|                                                       input_inlined_buffer, tmp_output, ctrl2); | ||||
|  | ||||
|                 if (command.is_out != 0) { | ||||
|                     ctx.WriteBuffer(tmp_output); | ||||
|                 } | ||||
|  | ||||
|                 IPC::ResponseBuilder rb{ctx_, 3}; | ||||
|                 rb.Push(RESULT_SUCCESS); | ||||
|                 rb.PushEnum(nv_result2); | ||||
|             }, | ||||
|             nvdrv->GetEventWriteable(ctrl.event_id)); | ||||
|     } else { | ||||
|         if (command.is_out != 0) { | ||||
|             ctx.WriteBuffer(output_buffer); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     if (command.is_out != 0) { | ||||
|         ctx.WriteBuffer(output_buffer); | ||||
|     } | ||||
| @@ -114,11 +165,36 @@ void NVDRV::Ioctl3(Kernel::HLERequestContext& ctx) { | ||||
|     std::vector<u8> output_buffer(ctx.GetWriteBufferSize(0)); | ||||
|     std::vector<u8> output_buffer_inline(ctx.GetWriteBufferSize(1)); | ||||
|  | ||||
|     IoctlCtrl ctrl{}; | ||||
|     const auto nv_result = | ||||
|         nvdrv->Ioctl3(fd, command, input_buffer, output_buffer, output_buffer_inline); | ||||
|     if (command.is_out != 0) { | ||||
|         ctx.WriteBuffer(output_buffer, 0); | ||||
|         ctx.WriteBuffer(output_buffer_inline, 1); | ||||
|         nvdrv->Ioctl3(fd, command, input_buffer, output_buffer, output_buffer_inline, ctrl); | ||||
|     if (ctrl.must_delay) { | ||||
|         ctrl.fresh_call = false; | ||||
|         ctx.SleepClientThread( | ||||
|             "NVServices::DelayedResponse", ctrl.timeout, | ||||
|             [=, this](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx_, | ||||
|                       Kernel::ThreadWakeupReason reason) { | ||||
|                 IoctlCtrl ctrl2{ctrl}; | ||||
|                 std::vector<u8> tmp_output = output_buffer; | ||||
|                 std::vector<u8> tmp_output2 = output_buffer; | ||||
|                 const auto nv_result2 = | ||||
|                     nvdrv->Ioctl3(fd, command, input_buffer, tmp_output, tmp_output2, ctrl2); | ||||
|  | ||||
|                 if (command.is_out != 0) { | ||||
|                     ctx.WriteBuffer(tmp_output, 0); | ||||
|                     ctx.WriteBuffer(tmp_output2, 1); | ||||
|                 } | ||||
|  | ||||
|                 IPC::ResponseBuilder rb{ctx_, 3}; | ||||
|                 rb.Push(RESULT_SUCCESS); | ||||
|                 rb.PushEnum(nv_result2); | ||||
|             }, | ||||
|             nvdrv->GetEventWriteable(ctrl.event_id)); | ||||
|     } else { | ||||
|         if (command.is_out != 0) { | ||||
|             ctx.WriteBuffer(output_buffer, 0); | ||||
|             ctx.WriteBuffer(output_buffer_inline, 1); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     IPC::ResponseBuilder rb{ctx, 3}; | ||||
|   | ||||
| @@ -97,4 +97,15 @@ union Ioctl { | ||||
|     BitField<31, 1, u32> is_out; | ||||
| }; | ||||
|  | ||||
| struct IoctlCtrl { | ||||
|     // First call done to the servioce for services that call itself again after a call. | ||||
|     bool fresh_call{true}; | ||||
|     // Tells the Ioctl Wrapper that it must delay the IPC response and send the thread to sleep | ||||
|     bool must_delay{}; | ||||
|     // Timeout for the delay | ||||
|     s64 timeout{}; | ||||
|     // NV Event Id | ||||
|     s32 event_id{-1}; | ||||
| }; | ||||
|  | ||||
| } // namespace Service::Nvidia | ||||
|   | ||||
| @@ -91,7 +91,7 @@ DeviceFD Module::Open(const std::string& device_name) { | ||||
| } | ||||
|  | ||||
| NvResult Module::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | ||||
|                         std::vector<u8>& output) { | ||||
|                         std::vector<u8>& output, IoctlCtrl& ctrl) { | ||||
|     if (fd < 0) { | ||||
|         LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd); | ||||
|         return NvResult::InvalidState; | ||||
| @@ -104,11 +104,12 @@ NvResult Module::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input | ||||
|         return NvResult::NotImplemented; | ||||
|     } | ||||
|  | ||||
|     return itr->second->Ioctl1(command, input, output); | ||||
|     return itr->second->Ioctl1(command, input, output, ctrl); | ||||
| } | ||||
|  | ||||
| NvResult Module::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | ||||
|                         const std::vector<u8>& inline_input, std::vector<u8>& output) { | ||||
|                         const std::vector<u8>& inline_input, std::vector<u8>& output, | ||||
|                         IoctlCtrl& ctrl) { | ||||
|     if (fd < 0) { | ||||
|         LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd); | ||||
|         return NvResult::InvalidState; | ||||
| @@ -121,11 +122,11 @@ NvResult Module::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input | ||||
|         return NvResult::NotImplemented; | ||||
|     } | ||||
|  | ||||
|     return itr->second->Ioctl2(command, input, inline_input, output); | ||||
|     return itr->second->Ioctl2(command, input, inline_input, output, ctrl); | ||||
| } | ||||
|  | ||||
| NvResult Module::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | ||||
|                         std::vector<u8>& output, std::vector<u8>& inline_output) { | ||||
|                         std::vector<u8>& output, std::vector<u8>& inline_output, IoctlCtrl& ctrl) { | ||||
|     if (fd < 0) { | ||||
|         LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd); | ||||
|         return NvResult::InvalidState; | ||||
| @@ -138,7 +139,7 @@ NvResult Module::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input | ||||
|         return NvResult::NotImplemented; | ||||
|     } | ||||
|  | ||||
|     return itr->second->Ioctl3(command, input, output, inline_output); | ||||
|     return itr->second->Ioctl3(command, input, output, inline_output, ctrl); | ||||
| } | ||||
|  | ||||
| NvResult Module::Close(DeviceFD fd) { | ||||
|   | ||||
| @@ -119,13 +119,13 @@ public: | ||||
|  | ||||
|     /// Sends an ioctl command to the specified file descriptor. | ||||
|     NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | ||||
|                     std::vector<u8>& output); | ||||
|                     std::vector<u8>& output, IoctlCtrl& ctrl); | ||||
|  | ||||
|     NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output); | ||||
|                     const std::vector<u8>& inline_input, std::vector<u8>& output, IoctlCtrl& ctrl); | ||||
|  | ||||
|     NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input, | ||||
|                     std::vector<u8>& output, std::vector<u8>& inline_output); | ||||
|                     std::vector<u8>& output, std::vector<u8>& inline_output, IoctlCtrl& ctrl); | ||||
|  | ||||
|     /// Closes a device file descriptor and returns operation success. | ||||
|     NvResult Close(DeviceFD fd); | ||||
|   | ||||
| @@ -25,12 +25,7 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) | ||||
|     ASSERT(slot < buffer_slots); | ||||
|     LOG_WARNING(Service, "Adding graphics buffer {}", slot); | ||||
|  | ||||
|     { | ||||
|         std::unique_lock lock{queue_mutex}; | ||||
|         free_buffers.push_back(slot); | ||||
|     } | ||||
|     condition.notify_one(); | ||||
|  | ||||
|     free_buffers.push_back(slot); | ||||
|     buffers[slot] = { | ||||
|         .slot = slot, | ||||
|         .status = Buffer::Status::Free, | ||||
| @@ -46,20 +41,10 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) | ||||
|  | ||||
| std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width, | ||||
|                                                                                        u32 height) { | ||||
|     // Wait for first request before trying to dequeue | ||||
|     { | ||||
|         std::unique_lock lock{queue_mutex}; | ||||
|         condition.wait(lock, [this] { return !free_buffers.empty() || !is_connect; }); | ||||
|     } | ||||
|  | ||||
|     if (!is_connect) { | ||||
|         // Buffer was disconnected while the thread was blocked, this is most likely due to | ||||
|         // emulation being stopped | ||||
|     if (free_buffers.empty()) { | ||||
|         return std::nullopt; | ||||
|     } | ||||
|  | ||||
|     std::unique_lock lock{queue_mutex}; | ||||
|  | ||||
|     auto f_itr = free_buffers.begin(); | ||||
|     auto slot = buffers.size(); | ||||
|  | ||||
| @@ -112,11 +97,7 @@ void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& mult | ||||
|     buffers[slot].multi_fence = multi_fence; | ||||
|     buffers[slot].swap_interval = 0; | ||||
|  | ||||
|     { | ||||
|         std::unique_lock lock{queue_mutex}; | ||||
|         free_buffers.push_back(slot); | ||||
|     } | ||||
|     condition.notify_one(); | ||||
|     free_buffers.push_back(slot); | ||||
|  | ||||
|     buffer_wait_event.writable->Signal(); | ||||
| } | ||||
| @@ -146,28 +127,15 @@ void BufferQueue::ReleaseBuffer(u32 slot) { | ||||
|     ASSERT(buffers[slot].slot == slot); | ||||
|  | ||||
|     buffers[slot].status = Buffer::Status::Free; | ||||
|     { | ||||
|         std::unique_lock lock{queue_mutex}; | ||||
|         free_buffers.push_back(slot); | ||||
|     } | ||||
|     condition.notify_one(); | ||||
|     free_buffers.push_back(slot); | ||||
|  | ||||
|     buffer_wait_event.writable->Signal(); | ||||
| } | ||||
|  | ||||
| void BufferQueue::Connect() { | ||||
|     queue_sequence.clear(); | ||||
|     id = 1; | ||||
|     layer_id = 1; | ||||
|     is_connect = true; | ||||
| } | ||||
|  | ||||
| void BufferQueue::Disconnect() { | ||||
|     buffers.fill({}); | ||||
|     queue_sequence.clear(); | ||||
|     buffer_wait_event.writable->Signal(); | ||||
|     is_connect = false; | ||||
|     condition.notify_one(); | ||||
| } | ||||
|  | ||||
| u32 BufferQueue::Query(QueryType type) { | ||||
|   | ||||
| @@ -4,9 +4,7 @@ | ||||
|  | ||||
| #pragma once | ||||
|  | ||||
| #include <condition_variable> | ||||
| #include <list> | ||||
| #include <mutex> | ||||
| #include <optional> | ||||
| #include <vector> | ||||
|  | ||||
| @@ -101,7 +99,6 @@ public: | ||||
|     void CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& multi_fence); | ||||
|     std::optional<std::reference_wrapper<const Buffer>> AcquireBuffer(); | ||||
|     void ReleaseBuffer(u32 slot); | ||||
|     void Connect(); | ||||
|     void Disconnect(); | ||||
|     u32 Query(QueryType type); | ||||
|  | ||||
| @@ -109,28 +106,18 @@ public: | ||||
|         return id; | ||||
|     } | ||||
|  | ||||
|     bool IsConnected() const { | ||||
|         return is_connect; | ||||
|     } | ||||
|  | ||||
|     std::shared_ptr<Kernel::WritableEvent> GetWritableBufferWaitEvent() const; | ||||
|  | ||||
|     std::shared_ptr<Kernel::ReadableEvent> GetBufferWaitEvent() const; | ||||
|  | ||||
| private: | ||||
|     BufferQueue(const BufferQueue&) = delete; | ||||
|  | ||||
|     u32 id{}; | ||||
|     u64 layer_id{}; | ||||
|     std::atomic_bool is_connect{}; | ||||
|     u32 id; | ||||
|     u64 layer_id; | ||||
|  | ||||
|     std::list<u32> free_buffers; | ||||
|     std::array<Buffer, buffer_slots> buffers; | ||||
|     std::list<u32> queue_sequence; | ||||
|     Kernel::EventPair buffer_wait_event; | ||||
|  | ||||
|     std::mutex queue_mutex; | ||||
|     std::condition_variable condition; | ||||
| }; | ||||
|  | ||||
| } // namespace Service::NVFlinger | ||||
|   | ||||
| @@ -88,10 +88,6 @@ NVFlinger::NVFlinger(Core::System& system) : system(system) { | ||||
| } | ||||
|  | ||||
| NVFlinger::~NVFlinger() { | ||||
|     for (auto& buffer_queue : buffer_queues) { | ||||
|         buffer_queue->Disconnect(); | ||||
|     } | ||||
|  | ||||
|     if (system.IsMulticore()) { | ||||
|         is_running = false; | ||||
|         wait_event->Set(); | ||||
| @@ -108,8 +104,6 @@ void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { | ||||
| } | ||||
|  | ||||
| std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) { | ||||
|     const auto guard = Lock(); | ||||
|  | ||||
|     LOG_DEBUG(Service, "Opening \"{}\" display", name); | ||||
|  | ||||
|     // TODO(Subv): Currently we only support the Default display. | ||||
| @@ -127,7 +121,6 @@ std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) { | ||||
| } | ||||
|  | ||||
| std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { | ||||
|     const auto guard = Lock(); | ||||
|     auto* const display = FindDisplay(display_id); | ||||
|  | ||||
|     if (display == nullptr) { | ||||
| @@ -136,22 +129,18 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { | ||||
|  | ||||
|     const u64 layer_id = next_layer_id++; | ||||
|     const u32 buffer_queue_id = next_buffer_queue_id++; | ||||
|     buffer_queues.emplace_back( | ||||
|         std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id)); | ||||
|     display->CreateLayer(layer_id, *buffer_queues.back()); | ||||
|     buffer_queues.emplace_back(system.Kernel(), buffer_queue_id, layer_id); | ||||
|     display->CreateLayer(layer_id, buffer_queues.back()); | ||||
|     return layer_id; | ||||
| } | ||||
|  | ||||
| void NVFlinger::CloseLayer(u64 layer_id) { | ||||
|     const auto guard = Lock(); | ||||
|  | ||||
|     for (auto& display : displays) { | ||||
|         display.CloseLayer(layer_id); | ||||
|     } | ||||
| } | ||||
|  | ||||
| std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) const { | ||||
|     const auto guard = Lock(); | ||||
|     const auto* const layer = FindLayer(display_id, layer_id); | ||||
|  | ||||
|     if (layer == nullptr) { | ||||
| @@ -162,7 +151,6 @@ std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) co | ||||
| } | ||||
|  | ||||
| std::shared_ptr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id) const { | ||||
|     const auto guard = Lock(); | ||||
|     auto* const display = FindDisplay(display_id); | ||||
|  | ||||
|     if (display == nullptr) { | ||||
| @@ -172,16 +160,20 @@ std::shared_ptr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id) | ||||
|     return display->GetVSyncEvent(); | ||||
| } | ||||
|  | ||||
| BufferQueue* NVFlinger::FindBufferQueue(u32 id) { | ||||
|     const auto guard = Lock(); | ||||
| BufferQueue& NVFlinger::FindBufferQueue(u32 id) { | ||||
|     const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), | ||||
|                                   [id](const auto& queue) { return queue->GetId() == id; }); | ||||
|                                   [id](const auto& queue) { return queue.GetId() == id; }); | ||||
|  | ||||
|     if (itr == buffer_queues.end()) { | ||||
|         return nullptr; | ||||
|     } | ||||
|     ASSERT(itr != buffer_queues.end()); | ||||
|     return *itr; | ||||
| } | ||||
|  | ||||
|     return itr->get(); | ||||
| const BufferQueue& NVFlinger::FindBufferQueue(u32 id) const { | ||||
|     const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), | ||||
|                                   [id](const auto& queue) { return queue.GetId() == id; }); | ||||
|  | ||||
|     ASSERT(itr != buffer_queues.end()); | ||||
|     return *itr; | ||||
| } | ||||
|  | ||||
| VI::Display* NVFlinger::FindDisplay(u64 display_id) { | ||||
|   | ||||
| @@ -75,7 +75,10 @@ public: | ||||
|     [[nodiscard]] std::shared_ptr<Kernel::ReadableEvent> FindVsyncEvent(u64 display_id) const; | ||||
|  | ||||
|     /// Obtains a buffer queue identified by the ID. | ||||
|     [[nodiscard]] BufferQueue* FindBufferQueue(u32 id); | ||||
|     [[nodiscard]] BufferQueue& FindBufferQueue(u32 id); | ||||
|  | ||||
|     /// Obtains a buffer queue identified by the ID. | ||||
|     [[nodiscard]] const BufferQueue& FindBufferQueue(u32 id) const; | ||||
|  | ||||
|     /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when | ||||
|     /// finished. | ||||
| @@ -83,11 +86,11 @@ public: | ||||
|  | ||||
|     [[nodiscard]] s64 GetNextTicks() const; | ||||
|  | ||||
| private: | ||||
|     [[nodiscard]] std::unique_lock<std::mutex> Lock() const { | ||||
|         return std::unique_lock{*guard}; | ||||
|     } | ||||
|  | ||||
| private: | ||||
|     /// Finds the display identified by the specified ID. | ||||
|     [[nodiscard]] VI::Display* FindDisplay(u64 display_id); | ||||
|  | ||||
| @@ -107,7 +110,7 @@ private: | ||||
|     std::shared_ptr<Nvidia::Module> nvdrv; | ||||
|  | ||||
|     std::vector<VI::Display> displays; | ||||
|     std::vector<std::unique_ptr<BufferQueue>> buffer_queues; | ||||
|     std::vector<BufferQueue> buffer_queues; | ||||
|  | ||||
|     /// Id to use for the next layer that is created, this counter is shared among all displays. | ||||
|     u64 next_layer_id = 1; | ||||
|   | ||||
| @@ -95,14 +95,9 @@ ServiceFrameworkBase::ServiceFrameworkBase(Core::System& system_, const char* se | ||||
|     : system{system_}, service_name{service_name_}, max_sessions{max_sessions_}, | ||||
|       handler_invoker{handler_invoker_} {} | ||||
|  | ||||
| ServiceFrameworkBase::~ServiceFrameworkBase() { | ||||
|     // Wait for other threads to release access before destroying | ||||
|     const auto guard = LockService(); | ||||
| } | ||||
| ServiceFrameworkBase::~ServiceFrameworkBase() = default; | ||||
|  | ||||
| void ServiceFrameworkBase::InstallAsService(SM::ServiceManager& service_manager) { | ||||
|     const auto guard = LockService(); | ||||
|  | ||||
|     ASSERT(!port_installed); | ||||
|  | ||||
|     auto port = service_manager.RegisterService(service_name, max_sessions).Unwrap(); | ||||
| @@ -111,8 +106,6 @@ void ServiceFrameworkBase::InstallAsService(SM::ServiceManager& service_manager) | ||||
| } | ||||
|  | ||||
| void ServiceFrameworkBase::InstallAsNamedPort(Kernel::KernelCore& kernel) { | ||||
|     const auto guard = LockService(); | ||||
|  | ||||
|     ASSERT(!port_installed); | ||||
|  | ||||
|     auto [server_port, client_port] = | ||||
| @@ -122,6 +115,17 @@ void ServiceFrameworkBase::InstallAsNamedPort(Kernel::KernelCore& kernel) { | ||||
|     port_installed = true; | ||||
| } | ||||
|  | ||||
| std::shared_ptr<Kernel::ClientPort> ServiceFrameworkBase::CreatePort(Kernel::KernelCore& kernel) { | ||||
|     ASSERT(!port_installed); | ||||
|  | ||||
|     auto [server_port, client_port] = | ||||
|         Kernel::ServerPort::CreatePortPair(kernel, max_sessions, service_name); | ||||
|     auto port = MakeResult(std::move(server_port)).Unwrap(); | ||||
|     port->SetHleHandler(shared_from_this()); | ||||
|     port_installed = true; | ||||
|     return client_port; | ||||
| } | ||||
|  | ||||
| void ServiceFrameworkBase::RegisterHandlersBase(const FunctionInfoBase* functions, std::size_t n) { | ||||
|     handlers.reserve(handlers.size() + n); | ||||
|     for (std::size_t i = 0; i < n; ++i) { | ||||
| @@ -160,8 +164,6 @@ void ServiceFrameworkBase::InvokeRequest(Kernel::HLERequestContext& ctx) { | ||||
| } | ||||
|  | ||||
| ResultCode ServiceFrameworkBase::HandleSyncRequest(Kernel::HLERequestContext& context) { | ||||
|     const auto guard = LockService(); | ||||
|  | ||||
|     switch (context.GetCommandType()) { | ||||
|     case IPC::CommandType::Close: { | ||||
|         IPC::ResponseBuilder rb{context, 2}; | ||||
| @@ -182,11 +184,7 @@ ResultCode ServiceFrameworkBase::HandleSyncRequest(Kernel::HLERequestContext& co | ||||
|         UNIMPLEMENTED_MSG("command_type={}", context.GetCommandType()); | ||||
|     } | ||||
|  | ||||
|     // If emulation was shutdown, we are closing service threads, do not write the response back to | ||||
|     // memory that may be shutting down as well. | ||||
|     if (system.IsPoweredOn()) { | ||||
|         context.WriteToOutgoingCommandBuffer(context.GetThread()); | ||||
|     } | ||||
|     context.WriteToOutgoingCommandBuffer(context.GetThread()); | ||||
|  | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
|   | ||||
| @@ -5,11 +5,9 @@ | ||||
| #pragma once | ||||
|  | ||||
| #include <cstddef> | ||||
| #include <mutex> | ||||
| #include <string> | ||||
| #include <boost/container/flat_map.hpp> | ||||
| #include "common/common_types.h" | ||||
| #include "common/spin_lock.h" | ||||
| #include "core/hle/kernel/hle_ipc.h" | ||||
| #include "core/hle/kernel/object.h" | ||||
|  | ||||
| @@ -70,9 +68,11 @@ public: | ||||
|     void InstallAsService(SM::ServiceManager& service_manager); | ||||
|     /// Creates a port pair and registers it on the kernel's global port registry. | ||||
|     void InstallAsNamedPort(Kernel::KernelCore& kernel); | ||||
|     /// Invokes a service request routine. | ||||
|     /// Creates and returns an unregistered port for the service. | ||||
|     std::shared_ptr<Kernel::ClientPort> CreatePort(Kernel::KernelCore& kernel); | ||||
|  | ||||
|     void InvokeRequest(Kernel::HLERequestContext& ctx); | ||||
|     /// Handles a synchronization request for the service. | ||||
|  | ||||
|     ResultCode HandleSyncRequest(Kernel::HLERequestContext& context) override; | ||||
|  | ||||
| protected: | ||||
| @@ -80,11 +80,6 @@ protected: | ||||
|     template <typename Self> | ||||
|     using HandlerFnP = void (Self::*)(Kernel::HLERequestContext&); | ||||
|  | ||||
|     /// Used to gain exclusive access to the service members, e.g. from CoreTiming thread. | ||||
|     [[nodiscard]] std::scoped_lock<Common::SpinLock> LockService() { | ||||
|         return std::scoped_lock{lock_service}; | ||||
|     } | ||||
|  | ||||
|     /// System context that the service operates under. | ||||
|     Core::System& system; | ||||
|  | ||||
| @@ -120,9 +115,6 @@ private: | ||||
|     /// Function used to safely up-cast pointers to the derived class before invoking a handler. | ||||
|     InvokerFn* handler_invoker; | ||||
|     boost::container::flat_map<u32, FunctionInfoBase> handlers; | ||||
|  | ||||
|     /// Used to gain exclusive access to the service members, e.g. from CoreTiming thread. | ||||
|     Common::SpinLock lock_service; | ||||
| }; | ||||
|  | ||||
| /** | ||||
|   | ||||
							
								
								
									
										161
									
								
								src/core/hle/service/sockets/blocking_worker.h
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										161
									
								
								src/core/hle/service/sockets/blocking_worker.h
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,161 @@ | ||||
| // Copyright 2020 yuzu emulator team | ||||
| // Licensed under GPLv2 or any later version | ||||
| // Refer to the license.txt file included. | ||||
|  | ||||
| #pragma once | ||||
|  | ||||
| #include <atomic> | ||||
| #include <memory> | ||||
| #include <string> | ||||
| #include <string_view> | ||||
| #include <thread> | ||||
| #include <variant> | ||||
| #include <vector> | ||||
|  | ||||
| #include <fmt/format.h> | ||||
|  | ||||
| #include "common/assert.h" | ||||
| #include "common/microprofile.h" | ||||
| #include "common/thread.h" | ||||
| #include "core/core.h" | ||||
| #include "core/hle/kernel/hle_ipc.h" | ||||
| #include "core/hle/kernel/kernel.h" | ||||
| #include "core/hle/kernel/thread.h" | ||||
| #include "core/hle/kernel/writable_event.h" | ||||
|  | ||||
| namespace Service::Sockets { | ||||
|  | ||||
| /** | ||||
|  * Worker abstraction to execute blocking calls on host without blocking the guest thread | ||||
|  * | ||||
|  * @tparam Service  Service where the work is executed | ||||
|  * @tparam Types Types of work to execute | ||||
|  */ | ||||
| template <class Service, class... Types> | ||||
| class BlockingWorker { | ||||
|     using This = BlockingWorker<Service, Types...>; | ||||
|     using WorkVariant = std::variant<std::monostate, Types...>; | ||||
|  | ||||
| public: | ||||
|     /// Create a new worker | ||||
|     static std::unique_ptr<This> Create(Core::System& system, Service* service, | ||||
|                                         std::string_view name) { | ||||
|         return std::unique_ptr<This>(new This(system, service, name)); | ||||
|     } | ||||
|  | ||||
|     ~BlockingWorker() { | ||||
|         while (!is_available.load(std::memory_order_relaxed)) { | ||||
|             // Busy wait until work is finished | ||||
|             std::this_thread::yield(); | ||||
|         } | ||||
|         // Monostate means to exit the thread | ||||
|         work = std::monostate{}; | ||||
|         work_event.Set(); | ||||
|         thread.join(); | ||||
|     } | ||||
|  | ||||
|     /** | ||||
|      * Try to capture the worker to send work after a success | ||||
|      * @returns True when the worker has been successfully captured | ||||
|      */ | ||||
|     bool TryCapture() { | ||||
|         bool expected = true; | ||||
|         return is_available.compare_exchange_weak(expected, false, std::memory_order_relaxed, | ||||
|                                                   std::memory_order_relaxed); | ||||
|     } | ||||
|  | ||||
|     /** | ||||
|      * Send work to this worker abstraction | ||||
|      * @see TryCapture must be called before attempting to call this function | ||||
|      */ | ||||
|     template <class Work> | ||||
|     void SendWork(Work new_work) { | ||||
|         ASSERT_MSG(!is_available, "Trying to send work on a worker that's not captured"); | ||||
|         work = std::move(new_work); | ||||
|         work_event.Set(); | ||||
|     } | ||||
|  | ||||
|     /// Generate a callback for @see SleepClientThread | ||||
|     template <class Work> | ||||
|     auto Callback() { | ||||
|         return [this](std::shared_ptr<Kernel::Thread>, Kernel::HLERequestContext& ctx, | ||||
|                       Kernel::ThreadWakeupReason reason) { | ||||
|             ASSERT(reason == Kernel::ThreadWakeupReason::Signal); | ||||
|             std::get<Work>(work).Response(ctx); | ||||
|             is_available.store(true); | ||||
|         }; | ||||
|     } | ||||
|  | ||||
|     /// Get kernel event that will be signalled by the worker when the host operation finishes | ||||
|     std::shared_ptr<Kernel::WritableEvent> KernelEvent() const { | ||||
|         return kernel_event; | ||||
|     } | ||||
|  | ||||
| private: | ||||
|     explicit BlockingWorker(Core::System& system, Service* service, std::string_view name) { | ||||
|         auto pair = Kernel::WritableEvent::CreateEventPair(system.Kernel(), std::string(name)); | ||||
|         kernel_event = std::move(pair.writable); | ||||
|         thread = std::thread([this, &system, service, name] { Run(system, service, name); }); | ||||
|     } | ||||
|  | ||||
|     void Run(Core::System& system, Service* service, std::string_view name) { | ||||
|         system.RegisterHostThread(); | ||||
|  | ||||
|         const std::string thread_name = fmt::format("yuzu:{}", name); | ||||
|         MicroProfileOnThreadCreate(thread_name.c_str()); | ||||
|         Common::SetCurrentThreadName(thread_name.c_str()); | ||||
|  | ||||
|         bool keep_running = true; | ||||
|         while (keep_running) { | ||||
|             work_event.Wait(); | ||||
|  | ||||
|             const auto visit_fn = [service, &keep_running]<typename T>(T&& w) { | ||||
|                 if constexpr (std::is_same_v<std::decay_t<T>, std::monostate>) { | ||||
|                     keep_running = false; | ||||
|                 } else { | ||||
|                     w.Execute(service); | ||||
|                 } | ||||
|             }; | ||||
|             std::visit(visit_fn, work); | ||||
|  | ||||
|             kernel_event->Signal(); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     std::thread thread; | ||||
|     WorkVariant work; | ||||
|     Common::Event work_event; | ||||
|     std::shared_ptr<Kernel::WritableEvent> kernel_event; | ||||
|     std::atomic_bool is_available{true}; | ||||
| }; | ||||
|  | ||||
| template <class Service, class... Types> | ||||
| class BlockingWorkerPool { | ||||
|     using Worker = BlockingWorker<Service, Types...>; | ||||
|  | ||||
| public: | ||||
|     explicit BlockingWorkerPool(Core::System& system_, Service* service_) | ||||
|         : system{system_}, service{service_} {} | ||||
|  | ||||
|     /// Returns a captured worker thread, creating new ones if necessary | ||||
|     Worker* CaptureWorker() { | ||||
|         for (auto& worker : workers) { | ||||
|             if (worker->TryCapture()) { | ||||
|                 return worker.get(); | ||||
|             } | ||||
|         } | ||||
|         auto new_worker = Worker::Create(system, service, fmt::format("BSD:{}", workers.size())); | ||||
|         [[maybe_unused]] const bool success = new_worker->TryCapture(); | ||||
|         ASSERT(success); | ||||
|  | ||||
|         return workers.emplace_back(std::move(new_worker)).get(); | ||||
|     } | ||||
|  | ||||
| private: | ||||
|     Core::System& system; | ||||
|     Service* const service; | ||||
|  | ||||
|     std::vector<std::unique_ptr<Worker>> workers; | ||||
| }; | ||||
|  | ||||
| } // namespace Service::Sockets | ||||
| @@ -178,12 +178,13 @@ void BSD::Poll(Kernel::HLERequestContext& ctx) { | ||||
|  | ||||
|     LOG_DEBUG(Service, "called. nfds={} timeout={}", nfds, timeout); | ||||
|  | ||||
|     ExecuteWork(ctx, PollWork{ | ||||
|                          .nfds = nfds, | ||||
|                          .timeout = timeout, | ||||
|                          .read_buffer = ctx.ReadBuffer(), | ||||
|                          .write_buffer = std::vector<u8>(ctx.GetWriteBufferSize()), | ||||
|                      }); | ||||
|     ExecuteWork(ctx, "BSD:Poll", timeout != 0, | ||||
|                 PollWork{ | ||||
|                     .nfds = nfds, | ||||
|                     .timeout = timeout, | ||||
|                     .read_buffer = ctx.ReadBuffer(), | ||||
|                     .write_buffer = std::vector<u8>(ctx.GetWriteBufferSize()), | ||||
|                 }); | ||||
| } | ||||
|  | ||||
| void BSD::Accept(Kernel::HLERequestContext& ctx) { | ||||
| @@ -192,10 +193,11 @@ void BSD::Accept(Kernel::HLERequestContext& ctx) { | ||||
|  | ||||
|     LOG_DEBUG(Service, "called. fd={}", fd); | ||||
|  | ||||
|     ExecuteWork(ctx, AcceptWork{ | ||||
|                          .fd = fd, | ||||
|                          .write_buffer = std::vector<u8>(ctx.GetWriteBufferSize()), | ||||
|                      }); | ||||
|     ExecuteWork(ctx, "BSD:Accept", IsBlockingSocket(fd), | ||||
|                 AcceptWork{ | ||||
|                     .fd = fd, | ||||
|                     .write_buffer = std::vector<u8>(ctx.GetWriteBufferSize()), | ||||
|                 }); | ||||
| } | ||||
|  | ||||
| void BSD::Bind(Kernel::HLERequestContext& ctx) { | ||||
| @@ -213,10 +215,11 @@ void BSD::Connect(Kernel::HLERequestContext& ctx) { | ||||
|  | ||||
|     LOG_DEBUG(Service, "called. fd={} addrlen={}", fd, ctx.GetReadBufferSize()); | ||||
|  | ||||
|     ExecuteWork(ctx, ConnectWork{ | ||||
|                          .fd = fd, | ||||
|                          .addr = ctx.ReadBuffer(), | ||||
|                      }); | ||||
|     ExecuteWork(ctx, "BSD:Connect", IsBlockingSocket(fd), | ||||
|                 ConnectWork{ | ||||
|                     .fd = fd, | ||||
|                     .addr = ctx.ReadBuffer(), | ||||
|                 }); | ||||
| } | ||||
|  | ||||
| void BSD::GetPeerName(Kernel::HLERequestContext& ctx) { | ||||
| @@ -324,11 +327,12 @@ void BSD::Recv(Kernel::HLERequestContext& ctx) { | ||||
|  | ||||
|     LOG_DEBUG(Service, "called. fd={} flags=0x{:x} len={}", fd, flags, ctx.GetWriteBufferSize()); | ||||
|  | ||||
|     ExecuteWork(ctx, RecvWork{ | ||||
|                          .fd = fd, | ||||
|                          .flags = flags, | ||||
|                          .message = std::vector<u8>(ctx.GetWriteBufferSize()), | ||||
|                      }); | ||||
|     ExecuteWork(ctx, "BSD:Recv", IsBlockingSocket(fd), | ||||
|                 RecvWork{ | ||||
|                     .fd = fd, | ||||
|                     .flags = flags, | ||||
|                     .message = std::vector<u8>(ctx.GetWriteBufferSize()), | ||||
|                 }); | ||||
| } | ||||
|  | ||||
| void BSD::RecvFrom(Kernel::HLERequestContext& ctx) { | ||||
| @@ -340,12 +344,13 @@ void BSD::RecvFrom(Kernel::HLERequestContext& ctx) { | ||||
|     LOG_DEBUG(Service, "called. fd={} flags=0x{:x} len={} addrlen={}", fd, flags, | ||||
|               ctx.GetWriteBufferSize(0), ctx.GetWriteBufferSize(1)); | ||||
|  | ||||
|     ExecuteWork(ctx, RecvFromWork{ | ||||
|                          .fd = fd, | ||||
|                          .flags = flags, | ||||
|                          .message = std::vector<u8>(ctx.GetWriteBufferSize(0)), | ||||
|                          .addr = std::vector<u8>(ctx.GetWriteBufferSize(1)), | ||||
|                      }); | ||||
|     ExecuteWork(ctx, "BSD:RecvFrom", IsBlockingSocket(fd), | ||||
|                 RecvFromWork{ | ||||
|                     .fd = fd, | ||||
|                     .flags = flags, | ||||
|                     .message = std::vector<u8>(ctx.GetWriteBufferSize(0)), | ||||
|                     .addr = std::vector<u8>(ctx.GetWriteBufferSize(1)), | ||||
|                 }); | ||||
| } | ||||
|  | ||||
| void BSD::Send(Kernel::HLERequestContext& ctx) { | ||||
| @@ -356,11 +361,12 @@ void BSD::Send(Kernel::HLERequestContext& ctx) { | ||||
|  | ||||
|     LOG_DEBUG(Service, "called. fd={} flags=0x{:x} len={}", fd, flags, ctx.GetReadBufferSize()); | ||||
|  | ||||
|     ExecuteWork(ctx, SendWork{ | ||||
|                          .fd = fd, | ||||
|                          .flags = flags, | ||||
|                          .message = ctx.ReadBuffer(), | ||||
|                      }); | ||||
|     ExecuteWork(ctx, "BSD:Send", IsBlockingSocket(fd), | ||||
|                 SendWork{ | ||||
|                     .fd = fd, | ||||
|                     .flags = flags, | ||||
|                     .message = ctx.ReadBuffer(), | ||||
|                 }); | ||||
| } | ||||
|  | ||||
| void BSD::SendTo(Kernel::HLERequestContext& ctx) { | ||||
| @@ -371,12 +377,13 @@ void BSD::SendTo(Kernel::HLERequestContext& ctx) { | ||||
|     LOG_DEBUG(Service, "called. fd={} flags=0x{} len={} addrlen={}", fd, flags, | ||||
|               ctx.GetReadBufferSize(0), ctx.GetReadBufferSize(1)); | ||||
|  | ||||
|     ExecuteWork(ctx, SendToWork{ | ||||
|                          .fd = fd, | ||||
|                          .flags = flags, | ||||
|                          .message = ctx.ReadBuffer(0), | ||||
|                          .addr = ctx.ReadBuffer(1), | ||||
|                      }); | ||||
|     ExecuteWork(ctx, "BSD:SendTo", IsBlockingSocket(fd), | ||||
|                 SendToWork{ | ||||
|                     .fd = fd, | ||||
|                     .flags = flags, | ||||
|                     .message = ctx.ReadBuffer(0), | ||||
|                     .addr = ctx.ReadBuffer(1), | ||||
|                 }); | ||||
| } | ||||
|  | ||||
| void BSD::Write(Kernel::HLERequestContext& ctx) { | ||||
| @@ -385,11 +392,12 @@ void BSD::Write(Kernel::HLERequestContext& ctx) { | ||||
|  | ||||
|     LOG_DEBUG(Service, "called. fd={} len={}", fd, ctx.GetReadBufferSize()); | ||||
|  | ||||
|     ExecuteWork(ctx, SendWork{ | ||||
|                          .fd = fd, | ||||
|                          .flags = 0, | ||||
|                          .message = ctx.ReadBuffer(), | ||||
|                      }); | ||||
|     ExecuteWork(ctx, "BSD:Write", IsBlockingSocket(fd), | ||||
|                 SendWork{ | ||||
|                     .fd = fd, | ||||
|                     .flags = 0, | ||||
|                     .message = ctx.ReadBuffer(), | ||||
|                 }); | ||||
| } | ||||
|  | ||||
| void BSD::Close(Kernel::HLERequestContext& ctx) { | ||||
| @@ -402,9 +410,24 @@ void BSD::Close(Kernel::HLERequestContext& ctx) { | ||||
| } | ||||
|  | ||||
| template <typename Work> | ||||
| void BSD::ExecuteWork(Kernel::HLERequestContext& ctx, Work work) { | ||||
|     work.Execute(this); | ||||
| void BSD::ExecuteWork(Kernel::HLERequestContext& ctx, std::string_view sleep_reason, | ||||
|                       bool is_blocking, Work work) { | ||||
|     if (!is_blocking) { | ||||
|         work.Execute(this); | ||||
|         work.Response(ctx); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     // Signal a dummy response to make IPC validation happy | ||||
|     // This will be overwritten by the SleepClientThread callback | ||||
|     work.Response(ctx); | ||||
|  | ||||
|     auto worker = worker_pool.CaptureWorker(); | ||||
|  | ||||
|     ctx.SleepClientThread(std::string(sleep_reason), std::numeric_limits<u64>::max(), | ||||
|                           worker->Callback<Work>(), worker->KernelEvent()); | ||||
|  | ||||
|     worker->SendWork(std::move(work)); | ||||
| } | ||||
|  | ||||
| std::pair<s32, Errno> BSD::SocketImpl(Domain domain, Type type, Protocol protocol) { | ||||
| @@ -784,6 +807,18 @@ bool BSD::IsFileDescriptorValid(s32 fd) const noexcept { | ||||
|     return true; | ||||
| } | ||||
|  | ||||
| bool BSD::IsBlockingSocket(s32 fd) const noexcept { | ||||
|     // Inform invalid sockets as non-blocking | ||||
|     // This way we avoid using a worker thread as it will fail without blocking host | ||||
|     if (fd > static_cast<s32>(MAX_FD) || fd < 0) { | ||||
|         return false; | ||||
|     } | ||||
|     if (!file_descriptors[fd]) { | ||||
|         return false; | ||||
|     } | ||||
|     return (file_descriptors[fd]->flags & FLAG_O_NONBLOCK) != 0; | ||||
| } | ||||
|  | ||||
| void BSD::BuildErrnoResponse(Kernel::HLERequestContext& ctx, Errno bsd_errno) const noexcept { | ||||
|     IPC::ResponseBuilder rb{ctx, 4}; | ||||
|  | ||||
| @@ -792,7 +827,8 @@ void BSD::BuildErrnoResponse(Kernel::HLERequestContext& ctx, Errno bsd_errno) co | ||||
|     rb.PushEnum(bsd_errno); | ||||
| } | ||||
|  | ||||
| BSD::BSD(Core::System& system_, const char* name) : ServiceFramework{system_, name} { | ||||
| BSD::BSD(Core::System& system_, const char* name) | ||||
|     : ServiceFramework{system_, name}, worker_pool{system_, this} { | ||||
|     // clang-format off | ||||
|     static const FunctionInfo functions[] = { | ||||
|         {0, &BSD::RegisterClient, "RegisterClient"}, | ||||
|   | ||||
| @@ -11,6 +11,7 @@ | ||||
| #include "common/common_types.h" | ||||
| #include "core/hle/kernel/hle_ipc.h" | ||||
| #include "core/hle/service/service.h" | ||||
| #include "core/hle/service/sockets/blocking_worker.h" | ||||
| #include "core/hle/service/sockets/sockets.h" | ||||
|  | ||||
| namespace Core { | ||||
| @@ -137,7 +138,8 @@ private: | ||||
|     void Close(Kernel::HLERequestContext& ctx); | ||||
|  | ||||
|     template <typename Work> | ||||
|     void ExecuteWork(Kernel::HLERequestContext& ctx, Work work); | ||||
|     void ExecuteWork(Kernel::HLERequestContext& ctx, std::string_view sleep_reason, | ||||
|                      bool is_blocking, Work work); | ||||
|  | ||||
|     std::pair<s32, Errno> SocketImpl(Domain domain, Type type, Protocol protocol); | ||||
|     std::pair<s32, Errno> PollImpl(std::vector<u8>& write_buffer, std::vector<u8> read_buffer, | ||||
| @@ -161,10 +163,15 @@ private: | ||||
|  | ||||
|     s32 FindFreeFileDescriptorHandle() noexcept; | ||||
|     bool IsFileDescriptorValid(s32 fd) const noexcept; | ||||
|     bool IsBlockingSocket(s32 fd) const noexcept; | ||||
|  | ||||
|     void BuildErrnoResponse(Kernel::HLERequestContext& ctx, Errno bsd_errno) const noexcept; | ||||
|  | ||||
|     std::array<std::optional<FileDescriptor>, MAX_FD> file_descriptors; | ||||
|  | ||||
|     BlockingWorkerPool<BSD, PollWork, AcceptWork, ConnectWork, RecvWork, RecvFromWork, SendWork, | ||||
|                        SendToWork> | ||||
|         worker_pool; | ||||
| }; | ||||
|  | ||||
| class BSDCFG final : public ServiceFramework<BSDCFG> { | ||||
|   | ||||
| @@ -536,7 +536,8 @@ private: | ||||
|         LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, | ||||
|                   transaction, flags); | ||||
|  | ||||
|         auto& buffer_queue = *nv_flinger.FindBufferQueue(id); | ||||
|         const auto guard = nv_flinger.Lock(); | ||||
|         auto& buffer_queue = nv_flinger.FindBufferQueue(id); | ||||
|  | ||||
|         switch (transaction) { | ||||
|         case TransactionId::Connect: { | ||||
| @@ -546,9 +547,6 @@ private: | ||||
|                                  Settings::values.resolution_factor.GetValue()), | ||||
|                 static_cast<u32>(static_cast<u32>(DisplayResolution::UndockedHeight) * | ||||
|                                  Settings::values.resolution_factor.GetValue())}; | ||||
|  | ||||
|             buffer_queue.Connect(); | ||||
|  | ||||
|             ctx.WriteBuffer(response.Serialize()); | ||||
|             break; | ||||
|         } | ||||
| @@ -565,25 +563,40 @@ private: | ||||
|             IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()}; | ||||
|             const u32 width{request.data.width}; | ||||
|             const u32 height{request.data.height}; | ||||
|             auto result = buffer_queue.DequeueBuffer(width, height); | ||||
|  | ||||
|             do { | ||||
|                 if (auto result = buffer_queue.DequeueBuffer(width, height); result) { | ||||
|                     // Buffer is available | ||||
|                     IGBPDequeueBufferResponseParcel response{result->first, *result->second}; | ||||
|                     ctx.WriteBuffer(response.Serialize()); | ||||
|                     break; | ||||
|                 } | ||||
|             } while (buffer_queue.IsConnected()); | ||||
|             if (result) { | ||||
|                 // Buffer is available | ||||
|                 IGBPDequeueBufferResponseParcel response{result->first, *result->second}; | ||||
|                 ctx.WriteBuffer(response.Serialize()); | ||||
|             } else { | ||||
|                 // Wait the current thread until a buffer becomes available | ||||
|                 ctx.SleepClientThread( | ||||
|                     "IHOSBinderDriver::DequeueBuffer", UINT64_MAX, | ||||
|                     [=, this](std::shared_ptr<Kernel::Thread> thread, | ||||
|                               Kernel::HLERequestContext& ctx, Kernel::ThreadWakeupReason reason) { | ||||
|                         // Repeat TransactParcel DequeueBuffer when a buffer is available | ||||
|                         const auto guard = nv_flinger.Lock(); | ||||
|                         auto& buffer_queue = nv_flinger.FindBufferQueue(id); | ||||
|                         auto result = buffer_queue.DequeueBuffer(width, height); | ||||
|                         ASSERT_MSG(result != std::nullopt, "Could not dequeue buffer."); | ||||
|  | ||||
|                         IGBPDequeueBufferResponseParcel response{result->first, *result->second}; | ||||
|                         ctx.WriteBuffer(response.Serialize()); | ||||
|                         IPC::ResponseBuilder rb{ctx, 2}; | ||||
|                         rb.Push(RESULT_SUCCESS); | ||||
|                     }, | ||||
|                     buffer_queue.GetWritableBufferWaitEvent()); | ||||
|             } | ||||
|             break; | ||||
|         } | ||||
|         case TransactionId::RequestBuffer: { | ||||
|             IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()}; | ||||
|  | ||||
|             auto& buffer = buffer_queue.RequestBuffer(request.slot); | ||||
|  | ||||
|             IGBPRequestBufferResponseParcel response{buffer}; | ||||
|             ctx.WriteBuffer(response.Serialize()); | ||||
|  | ||||
|             break; | ||||
|         } | ||||
|         case TransactionId::QueueBuffer: { | ||||
| @@ -669,7 +682,7 @@ private: | ||||
|  | ||||
|         LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown); | ||||
|  | ||||
|         const auto& buffer_queue = *nv_flinger.FindBufferQueue(id); | ||||
|         const auto& buffer_queue = nv_flinger.FindBufferQueue(id); | ||||
|  | ||||
|         // TODO(Subv): Find out what this actually is. | ||||
|         IPC::ResponseBuilder rb{ctx, 2, 1}; | ||||
|   | ||||
| @@ -148,4 +148,9 @@ void RestoreGlobalState(bool is_powered_on) { | ||||
|     values.motion_enabled.SetGlobal(true); | ||||
| } | ||||
|  | ||||
| void Sanitize() { | ||||
|     values.use_asynchronous_gpu_emulation.SetValue( | ||||
|         values.use_asynchronous_gpu_emulation.GetValue() || values.use_multi_core.GetValue()); | ||||
| } | ||||
|  | ||||
| } // namespace Settings | ||||
|   | ||||
| @@ -257,4 +257,7 @@ void LogSettings(); | ||||
| // Restore the global state of all applicable settings in the Values struct | ||||
| void RestoreGlobalState(bool is_powered_on); | ||||
|  | ||||
| // Fixes settings that are known to cause issues with the emulator | ||||
| void Sanitize(); | ||||
|  | ||||
| } // namespace Settings | ||||
|   | ||||
| @@ -225,6 +225,11 @@ void Client::OnPortInfo([[maybe_unused]] Response::PortInfo data) { | ||||
| } | ||||
|  | ||||
| void Client::OnPadData(Response::PadData data, std::size_t client) { | ||||
|     // Accept packets only for the correct pad | ||||
|     if (static_cast<u8>(clients[client].pad_index) != data.info.id) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     LOG_TRACE(Input, "PadData packet received"); | ||||
|     if (data.packet_counter == clients[client].packet_sequence) { | ||||
|         LOG_WARNING( | ||||
|   | ||||
| @@ -48,7 +48,6 @@ add_library(video_core STATIC | ||||
|     engines/shader_bytecode.h | ||||
|     engines/shader_header.h | ||||
|     engines/shader_type.h | ||||
|     framebuffer_config.h | ||||
|     macro/macro.cpp | ||||
|     macro/macro.h | ||||
|     macro/macro_hle.cpp | ||||
| @@ -60,6 +59,10 @@ add_library(video_core STATIC | ||||
|     fence_manager.h | ||||
|     gpu.cpp | ||||
|     gpu.h | ||||
|     gpu_asynch.cpp | ||||
|     gpu_asynch.h | ||||
|     gpu_synch.cpp | ||||
|     gpu_synch.h | ||||
|     gpu_thread.cpp | ||||
|     gpu_thread.h | ||||
|     guest_driver.cpp | ||||
|   | ||||
| @@ -10,7 +10,6 @@ | ||||
| #include "core/core_timing.h" | ||||
| #include "core/core_timing_util.h" | ||||
| #include "core/frontend/emu_window.h" | ||||
| #include "core/hardware_interrupt_manager.h" | ||||
| #include "core/memory.h" | ||||
| #include "core/settings.h" | ||||
| #include "video_core/engines/fermi_2d.h" | ||||
| @@ -37,8 +36,7 @@ GPU::GPU(Core::System& system_, bool is_async_, bool use_nvdec_) | ||||
|       kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)}, | ||||
|       maxwell_dma{std::make_unique<Engines::MaxwellDMA>(system, *memory_manager)}, | ||||
|       kepler_memory{std::make_unique<Engines::KeplerMemory>(system, *memory_manager)}, | ||||
|       shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_}, | ||||
|       gpu_thread{system_, is_async_} {} | ||||
|       shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_} {} | ||||
|  | ||||
| GPU::~GPU() = default; | ||||
|  | ||||
| @@ -200,6 +198,10 @@ void GPU::SyncGuestHost() { | ||||
|     renderer->Rasterizer().SyncGuestHost(); | ||||
| } | ||||
|  | ||||
| void GPU::OnCommandListEnd() { | ||||
|     renderer->Rasterizer().ReleaseFences(); | ||||
| } | ||||
|  | ||||
| enum class GpuSemaphoreOperation { | ||||
|     AcquireEqual = 0x1, | ||||
|     WriteLong = 0x2, | ||||
| @@ -459,75 +461,4 @@ void GPU::ProcessSemaphoreAcquire() { | ||||
|     } | ||||
| } | ||||
|  | ||||
| void GPU::Start() { | ||||
|     gpu_thread.StartThread(*renderer, renderer->Context(), *dma_pusher, *cdma_pusher); | ||||
|     cpu_context = renderer->GetRenderWindow().CreateSharedContext(); | ||||
|     cpu_context->MakeCurrent(); | ||||
| } | ||||
|  | ||||
| void GPU::ObtainContext() { | ||||
|     cpu_context->MakeCurrent(); | ||||
| } | ||||
|  | ||||
| void GPU::ReleaseContext() { | ||||
|     cpu_context->DoneCurrent(); | ||||
| } | ||||
|  | ||||
| void GPU::PushGPUEntries(Tegra::CommandList&& entries) { | ||||
|     gpu_thread.SubmitList(std::move(entries)); | ||||
| } | ||||
|  | ||||
| void GPU::PushCommandBuffer(Tegra::ChCommandHeaderList& entries) { | ||||
|     if (!use_nvdec) { | ||||
|         return; | ||||
|     } | ||||
|     // This condition fires when a video stream ends, clear all intermediary data | ||||
|     if (entries[0].raw == 0xDEADB33F) { | ||||
|         cdma_pusher.reset(); | ||||
|         return; | ||||
|     } | ||||
|     if (!cdma_pusher) { | ||||
|         cdma_pusher = std::make_unique<Tegra::CDmaPusher>(*this); | ||||
|     } | ||||
|  | ||||
|     // SubmitCommandBuffer would make the nvdec operations async, this is not currently working | ||||
|     // TODO(ameerj): RE proper async nvdec operation | ||||
|     // gpu_thread.SubmitCommandBuffer(std::move(entries)); | ||||
|  | ||||
|     cdma_pusher->Push(std::move(entries)); | ||||
|     cdma_pusher->DispatchCalls(); | ||||
| } | ||||
|  | ||||
| void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { | ||||
|     gpu_thread.SwapBuffers(framebuffer); | ||||
| } | ||||
|  | ||||
| void GPU::FlushRegion(VAddr addr, u64 size) { | ||||
|     gpu_thread.FlushRegion(addr, size); | ||||
| } | ||||
|  | ||||
| void GPU::InvalidateRegion(VAddr addr, u64 size) { | ||||
|     gpu_thread.InvalidateRegion(addr, size); | ||||
| } | ||||
|  | ||||
| void GPU::FlushAndInvalidateRegion(VAddr addr, u64 size) { | ||||
|     gpu_thread.FlushAndInvalidateRegion(addr, size); | ||||
| } | ||||
|  | ||||
| void GPU::TriggerCpuInterrupt(const u32 syncpoint_id, const u32 value) const { | ||||
|     auto& interrupt_manager = system.InterruptManager(); | ||||
|     interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); | ||||
| } | ||||
|  | ||||
| void GPU::WaitIdle() const { | ||||
|     gpu_thread.WaitIdle(); | ||||
| } | ||||
|  | ||||
| void GPU::OnCommandListEnd() { | ||||
|     if (is_async) { | ||||
|         // This command only applies to asynchronous GPU mode | ||||
|         gpu_thread.OnCommandListEnd(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| } // namespace Tegra | ||||
|   | ||||
| @@ -15,8 +15,6 @@ | ||||
| #include "core/hle/service/nvflinger/buffer_queue.h" | ||||
| #include "video_core/cdma_pusher.h" | ||||
| #include "video_core/dma_pusher.h" | ||||
| #include "video_core/framebuffer_config.h" | ||||
| #include "video_core/gpu_thread.h" | ||||
|  | ||||
| using CacheAddr = std::uintptr_t; | ||||
| [[nodiscard]] inline CacheAddr ToCacheAddr(const void* host_ptr) { | ||||
| @@ -103,6 +101,28 @@ enum class DepthFormat : u32 { | ||||
| struct CommandListHeader; | ||||
| class DebugContext; | ||||
|  | ||||
| /** | ||||
|  * Struct describing framebuffer configuration | ||||
|  */ | ||||
| struct FramebufferConfig { | ||||
|     enum class PixelFormat : u32 { | ||||
|         A8B8G8R8_UNORM = 1, | ||||
|         RGB565_UNORM = 4, | ||||
|         B8G8R8A8_UNORM = 5, | ||||
|     }; | ||||
|  | ||||
|     VAddr address; | ||||
|     u32 offset; | ||||
|     u32 width; | ||||
|     u32 height; | ||||
|     u32 stride; | ||||
|     PixelFormat pixel_format; | ||||
|  | ||||
|     using TransformFlags = Service::NVFlinger::BufferQueue::BufferTransformFlags; | ||||
|     TransformFlags transform_flags; | ||||
|     Common::Rectangle<int> crop_rect; | ||||
| }; | ||||
|  | ||||
| namespace Engines { | ||||
| class Fermi2D; | ||||
| class Maxwell3D; | ||||
| @@ -121,7 +141,7 @@ enum class EngineID { | ||||
|  | ||||
| class MemoryManager; | ||||
|  | ||||
| class GPU final { | ||||
| class GPU { | ||||
| public: | ||||
|     struct MethodCall { | ||||
|         u32 method{}; | ||||
| @@ -139,7 +159,7 @@ public: | ||||
|     }; | ||||
|  | ||||
|     explicit GPU(Core::System& system_, bool is_async_, bool use_nvdec_); | ||||
|     ~GPU(); | ||||
|     virtual ~GPU(); | ||||
|  | ||||
|     /// Binds a renderer to the GPU. | ||||
|     void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer); | ||||
| @@ -156,7 +176,7 @@ public: | ||||
|     /// Synchronizes CPU writes with Host GPU memory. | ||||
|     void SyncGuestHost(); | ||||
|     /// Signal the ending of command list. | ||||
|     void OnCommandListEnd(); | ||||
|     virtual void OnCommandListEnd(); | ||||
|  | ||||
|     /// Request a host GPU memory flush from the CPU. | ||||
|     [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); | ||||
| @@ -220,7 +240,7 @@ public: | ||||
|     } | ||||
|  | ||||
|     // Waits for the GPU to finish working | ||||
|     void WaitIdle() const; | ||||
|     virtual void WaitIdle() const = 0; | ||||
|  | ||||
|     /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame. | ||||
|     void WaitFence(u32 syncpoint_id, u32 value); | ||||
| @@ -310,34 +330,34 @@ public: | ||||
|     /// Performs any additional setup necessary in order to begin GPU emulation. | ||||
|     /// This can be used to launch any necessary threads and register any necessary | ||||
|     /// core timing events. | ||||
|     void Start(); | ||||
|     virtual void Start() = 0; | ||||
|  | ||||
|     /// Obtain the CPU Context | ||||
|     void ObtainContext(); | ||||
|     virtual void ObtainContext() = 0; | ||||
|  | ||||
|     /// Release the CPU Context | ||||
|     void ReleaseContext(); | ||||
|     virtual void ReleaseContext() = 0; | ||||
|  | ||||
|     /// Push GPU command entries to be processed | ||||
|     void PushGPUEntries(Tegra::CommandList&& entries); | ||||
|     virtual void PushGPUEntries(Tegra::CommandList&& entries) = 0; | ||||
|  | ||||
|     /// Push GPU command buffer entries to be processed | ||||
|     void PushCommandBuffer(Tegra::ChCommandHeaderList& entries); | ||||
|     virtual void PushCommandBuffer(Tegra::ChCommandHeaderList& entries) = 0; | ||||
|  | ||||
|     /// Swap buffers (render frame) | ||||
|     void SwapBuffers(const Tegra::FramebufferConfig* framebuffer); | ||||
|     virtual void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) = 0; | ||||
|  | ||||
|     /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory | ||||
|     void FlushRegion(VAddr addr, u64 size); | ||||
|     virtual void FlushRegion(VAddr addr, u64 size) = 0; | ||||
|  | ||||
|     /// Notify rasterizer that any caches of the specified region should be invalidated | ||||
|     void InvalidateRegion(VAddr addr, u64 size); | ||||
|     virtual void InvalidateRegion(VAddr addr, u64 size) = 0; | ||||
|  | ||||
|     /// Notify rasterizer that any caches of the specified region should be flushed and invalidated | ||||
|     void FlushAndInvalidateRegion(VAddr addr, u64 size); | ||||
|     virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0; | ||||
|  | ||||
| protected: | ||||
|     void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const; | ||||
|     virtual void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const = 0; | ||||
|  | ||||
| private: | ||||
|     void ProcessBindMethod(const MethodCall& method_call); | ||||
| @@ -407,9 +427,6 @@ private: | ||||
|     std::mutex flush_request_mutex; | ||||
|  | ||||
|     const bool is_async; | ||||
|  | ||||
|     VideoCommon::GPUThread::ThreadManager gpu_thread; | ||||
|     std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context; | ||||
| }; | ||||
|  | ||||
| #define ASSERT_REG_POSITION(field_name, position)                                                  \ | ||||
|   | ||||
							
								
								
									
										86
									
								
								src/video_core/gpu_asynch.cpp
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										86
									
								
								src/video_core/gpu_asynch.cpp
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,86 @@ | ||||
| // Copyright 2019 yuzu Emulator Project | ||||
| // Licensed under GPLv2 or any later version | ||||
| // Refer to the license.txt file included. | ||||
|  | ||||
| #include "core/core.h" | ||||
| #include "core/hardware_interrupt_manager.h" | ||||
| #include "video_core/gpu_asynch.h" | ||||
| #include "video_core/gpu_thread.h" | ||||
| #include "video_core/renderer_base.h" | ||||
|  | ||||
| namespace VideoCommon { | ||||
|  | ||||
| GPUAsynch::GPUAsynch(Core::System& system_, bool use_nvdec_) | ||||
|     : GPU{system_, true, use_nvdec_}, gpu_thread{system_} {} | ||||
|  | ||||
| GPUAsynch::~GPUAsynch() = default; | ||||
|  | ||||
| void GPUAsynch::Start() { | ||||
|     gpu_thread.StartThread(*renderer, renderer->Context(), *dma_pusher, *cdma_pusher); | ||||
|     cpu_context = renderer->GetRenderWindow().CreateSharedContext(); | ||||
|     cpu_context->MakeCurrent(); | ||||
| } | ||||
|  | ||||
| void GPUAsynch::ObtainContext() { | ||||
|     cpu_context->MakeCurrent(); | ||||
| } | ||||
|  | ||||
| void GPUAsynch::ReleaseContext() { | ||||
|     cpu_context->DoneCurrent(); | ||||
| } | ||||
|  | ||||
| void GPUAsynch::PushGPUEntries(Tegra::CommandList&& entries) { | ||||
|     gpu_thread.SubmitList(std::move(entries)); | ||||
| } | ||||
|  | ||||
| void GPUAsynch::PushCommandBuffer(Tegra::ChCommandHeaderList& entries) { | ||||
|     if (!use_nvdec) { | ||||
|         return; | ||||
|     } | ||||
|     // This condition fires when a video stream ends, clear all intermediary data | ||||
|     if (entries[0].raw == 0xDEADB33F) { | ||||
|         cdma_pusher.reset(); | ||||
|         return; | ||||
|     } | ||||
|     if (!cdma_pusher) { | ||||
|         cdma_pusher = std::make_unique<Tegra::CDmaPusher>(*this); | ||||
|     } | ||||
|  | ||||
|     // SubmitCommandBuffer would make the nvdec operations async, this is not currently working | ||||
|     // TODO(ameerj): RE proper async nvdec operation | ||||
|     // gpu_thread.SubmitCommandBuffer(std::move(entries)); | ||||
|  | ||||
|     cdma_pusher->Push(std::move(entries)); | ||||
|     cdma_pusher->DispatchCalls(); | ||||
| } | ||||
|  | ||||
| void GPUAsynch::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { | ||||
|     gpu_thread.SwapBuffers(framebuffer); | ||||
| } | ||||
|  | ||||
| void GPUAsynch::FlushRegion(VAddr addr, u64 size) { | ||||
|     gpu_thread.FlushRegion(addr, size); | ||||
| } | ||||
|  | ||||
| void GPUAsynch::InvalidateRegion(VAddr addr, u64 size) { | ||||
|     gpu_thread.InvalidateRegion(addr, size); | ||||
| } | ||||
|  | ||||
| void GPUAsynch::FlushAndInvalidateRegion(VAddr addr, u64 size) { | ||||
|     gpu_thread.FlushAndInvalidateRegion(addr, size); | ||||
| } | ||||
|  | ||||
| void GPUAsynch::TriggerCpuInterrupt(const u32 syncpoint_id, const u32 value) const { | ||||
|     auto& interrupt_manager = system.InterruptManager(); | ||||
|     interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); | ||||
| } | ||||
|  | ||||
| void GPUAsynch::WaitIdle() const { | ||||
|     gpu_thread.WaitIdle(); | ||||
| } | ||||
|  | ||||
| void GPUAsynch::OnCommandListEnd() { | ||||
|     gpu_thread.OnCommandListEnd(); | ||||
| } | ||||
|  | ||||
| } // namespace VideoCommon | ||||
							
								
								
									
										47
									
								
								src/video_core/gpu_asynch.h
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										47
									
								
								src/video_core/gpu_asynch.h
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,47 @@ | ||||
| // Copyright 2019 yuzu Emulator Project | ||||
| // Licensed under GPLv2 or any later version | ||||
| // Refer to the license.txt file included. | ||||
|  | ||||
| #pragma once | ||||
|  | ||||
| #include "video_core/gpu.h" | ||||
| #include "video_core/gpu_thread.h" | ||||
|  | ||||
| namespace Core::Frontend { | ||||
| class GraphicsContext; | ||||
| } | ||||
|  | ||||
| namespace VideoCore { | ||||
| class RendererBase; | ||||
| } // namespace VideoCore | ||||
|  | ||||
| namespace VideoCommon { | ||||
|  | ||||
| /// Implementation of GPU interface that runs the GPU asynchronously | ||||
| class GPUAsynch final : public Tegra::GPU { | ||||
| public: | ||||
|     explicit GPUAsynch(Core::System& system_, bool use_nvdec_); | ||||
|     ~GPUAsynch() override; | ||||
|  | ||||
|     void Start() override; | ||||
|     void ObtainContext() override; | ||||
|     void ReleaseContext() override; | ||||
|     void PushGPUEntries(Tegra::CommandList&& entries) override; | ||||
|     void PushCommandBuffer(Tegra::ChCommandHeaderList& entries) override; | ||||
|     void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override; | ||||
|     void FlushRegion(VAddr addr, u64 size) override; | ||||
|     void InvalidateRegion(VAddr addr, u64 size) override; | ||||
|     void FlushAndInvalidateRegion(VAddr addr, u64 size) override; | ||||
|     void WaitIdle() const override; | ||||
|  | ||||
|     void OnCommandListEnd() override; | ||||
|  | ||||
| protected: | ||||
|     void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override; | ||||
|  | ||||
| private: | ||||
|     GPUThread::ThreadManager gpu_thread; | ||||
|     std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context; | ||||
| }; | ||||
|  | ||||
| } // namespace VideoCommon | ||||
							
								
								
									
										61
									
								
								src/video_core/gpu_synch.cpp
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										61
									
								
								src/video_core/gpu_synch.cpp
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,61 @@ | ||||
| // Copyright 2019 yuzu Emulator Project | ||||
| // Licensed under GPLv2 or any later version | ||||
| // Refer to the license.txt file included. | ||||
|  | ||||
| #include "video_core/gpu_synch.h" | ||||
| #include "video_core/renderer_base.h" | ||||
|  | ||||
| namespace VideoCommon { | ||||
|  | ||||
| GPUSynch::GPUSynch(Core::System& system_, bool use_nvdec_) : GPU{system_, false, use_nvdec_} {} | ||||
|  | ||||
| GPUSynch::~GPUSynch() = default; | ||||
|  | ||||
| void GPUSynch::Start() {} | ||||
|  | ||||
| void GPUSynch::ObtainContext() { | ||||
|     renderer->Context().MakeCurrent(); | ||||
| } | ||||
|  | ||||
| void GPUSynch::ReleaseContext() { | ||||
|     renderer->Context().DoneCurrent(); | ||||
| } | ||||
|  | ||||
| void GPUSynch::PushGPUEntries(Tegra::CommandList&& entries) { | ||||
|     dma_pusher->Push(std::move(entries)); | ||||
|     dma_pusher->DispatchCalls(); | ||||
| } | ||||
|  | ||||
| void GPUSynch::PushCommandBuffer(Tegra::ChCommandHeaderList& entries) { | ||||
|     if (!use_nvdec) { | ||||
|         return; | ||||
|     } | ||||
|     // This condition fires when a video stream ends, clears all intermediary data | ||||
|     if (entries[0].raw == 0xDEADB33F) { | ||||
|         cdma_pusher.reset(); | ||||
|         return; | ||||
|     } | ||||
|     if (!cdma_pusher) { | ||||
|         cdma_pusher = std::make_unique<Tegra::CDmaPusher>(*this); | ||||
|     } | ||||
|     cdma_pusher->Push(std::move(entries)); | ||||
|     cdma_pusher->DispatchCalls(); | ||||
| } | ||||
|  | ||||
| void GPUSynch::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { | ||||
|     renderer->SwapBuffers(framebuffer); | ||||
| } | ||||
|  | ||||
| void GPUSynch::FlushRegion(VAddr addr, u64 size) { | ||||
|     renderer->Rasterizer().FlushRegion(addr, size); | ||||
| } | ||||
|  | ||||
| void GPUSynch::InvalidateRegion(VAddr addr, u64 size) { | ||||
|     renderer->Rasterizer().InvalidateRegion(addr, size); | ||||
| } | ||||
|  | ||||
| void GPUSynch::FlushAndInvalidateRegion(VAddr addr, u64 size) { | ||||
|     renderer->Rasterizer().FlushAndInvalidateRegion(addr, size); | ||||
| } | ||||
|  | ||||
| } // namespace VideoCommon | ||||
							
								
								
									
										41
									
								
								src/video_core/gpu_synch.h
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										41
									
								
								src/video_core/gpu_synch.h
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,41 @@ | ||||
| // Copyright 2019 yuzu Emulator Project | ||||
| // Licensed under GPLv2 or any later version | ||||
| // Refer to the license.txt file included. | ||||
|  | ||||
| #pragma once | ||||
|  | ||||
| #include "video_core/gpu.h" | ||||
|  | ||||
| namespace Core::Frontend { | ||||
| class GraphicsContext; | ||||
| } | ||||
|  | ||||
| namespace VideoCore { | ||||
| class RendererBase; | ||||
| } // namespace VideoCore | ||||
|  | ||||
| namespace VideoCommon { | ||||
|  | ||||
| /// Implementation of GPU interface that runs the GPU synchronously | ||||
| class GPUSynch final : public Tegra::GPU { | ||||
| public: | ||||
|     explicit GPUSynch(Core::System& system_, bool use_nvdec_); | ||||
|     ~GPUSynch() override; | ||||
|  | ||||
|     void Start() override; | ||||
|     void ObtainContext() override; | ||||
|     void ReleaseContext() override; | ||||
|     void PushGPUEntries(Tegra::CommandList&& entries) override; | ||||
|     void PushCommandBuffer(Tegra::ChCommandHeaderList& entries) override; | ||||
|     void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override; | ||||
|     void FlushRegion(VAddr addr, u64 size) override; | ||||
|     void InvalidateRegion(VAddr addr, u64 size) override; | ||||
|     void FlushAndInvalidateRegion(VAddr addr, u64 size) override; | ||||
|     void WaitIdle() const override {} | ||||
|  | ||||
| protected: | ||||
|     void TriggerCpuInterrupt([[maybe_unused]] u32 syncpoint_id, | ||||
|                              [[maybe_unused]] u32 value) const override {} | ||||
| }; | ||||
|  | ||||
| } // namespace VideoCommon | ||||
| @@ -4,7 +4,6 @@ | ||||
|  | ||||
| #include "common/assert.h" | ||||
| #include "common/microprofile.h" | ||||
| #include "common/scope_exit.h" | ||||
| #include "common/thread.h" | ||||
| #include "core/core.h" | ||||
| #include "core/frontend/emu_window.h" | ||||
| @@ -22,8 +21,6 @@ static void RunThread(Core::System& system, VideoCore::RendererBase& renderer, | ||||
|                       SynchState& state, Tegra::CDmaPusher& cdma_pusher) { | ||||
|     std::string name = "yuzu:GPU"; | ||||
|     MicroProfileOnThreadCreate(name.c_str()); | ||||
|     SCOPE_EXIT({ MicroProfileOnThreadExit(); }); | ||||
|  | ||||
|     Common::SetCurrentThreadName(name.c_str()); | ||||
|     Common::SetCurrentThreadPriority(Common::ThreadPriority::High); | ||||
|     system.RegisterHostThread(); | ||||
| @@ -68,8 +65,7 @@ static void RunThread(Core::System& system, VideoCore::RendererBase& renderer, | ||||
|     } | ||||
| } | ||||
|  | ||||
| ThreadManager::ThreadManager(Core::System& system_, bool is_async_) | ||||
|     : system{system_}, is_async{is_async_} {} | ||||
| ThreadManager::ThreadManager(Core::System& system_) : system{system_} {} | ||||
|  | ||||
| ThreadManager::~ThreadManager() { | ||||
|     if (!thread.joinable()) { | ||||
| @@ -101,30 +97,19 @@ void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { | ||||
| } | ||||
|  | ||||
| void ThreadManager::FlushRegion(VAddr addr, u64 size) { | ||||
|     if (!is_async) { | ||||
|         // Always flush with synchronous GPU mode | ||||
|     if (!Settings::IsGPULevelHigh()) { | ||||
|         PushCommand(FlushRegionCommand(addr, size)); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     // Asynchronous GPU mode | ||||
|     switch (Settings::values.gpu_accuracy.GetValue()) { | ||||
|     case Settings::GPUAccuracy::Normal: | ||||
|         PushCommand(FlushRegionCommand(addr, size)); | ||||
|         break; | ||||
|     case Settings::GPUAccuracy::High: | ||||
|         // TODO(bunnei): Is this right? Preserving existing behavior for now | ||||
|         break; | ||||
|     case Settings::GPUAccuracy::Extreme: { | ||||
|     if (!Settings::IsGPULevelExtreme()) { | ||||
|         return; | ||||
|     } | ||||
|     if (system.Renderer().Rasterizer().MustFlushRegion(addr, size)) { | ||||
|         auto& gpu = system.GPU(); | ||||
|         u64 fence = gpu.RequestFlush(addr, size); | ||||
|         PushCommand(GPUTickCommand()); | ||||
|         while (fence > gpu.CurrentFlushRequestFence()) { | ||||
|         } | ||||
|         break; | ||||
|     } | ||||
|     default: | ||||
|         UNIMPLEMENTED_MSG("Unsupported gpu_accuracy {}", Settings::values.gpu_accuracy.GetValue()); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -138,8 +123,7 @@ void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) { | ||||
| } | ||||
|  | ||||
| void ThreadManager::WaitIdle() const { | ||||
|     while (state.last_fence > state.signaled_fence.load(std::memory_order_relaxed) && | ||||
|            system.IsPoweredOn()) { | ||||
|     while (state.last_fence > state.signaled_fence.load(std::memory_order_relaxed)) { | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -150,12 +134,6 @@ void ThreadManager::OnCommandListEnd() { | ||||
| u64 ThreadManager::PushCommand(CommandData&& command_data) { | ||||
|     const u64 fence{++state.last_fence}; | ||||
|     state.queue.Push(CommandDataContainer(std::move(command_data), fence)); | ||||
|  | ||||
|     if (!is_async) { | ||||
|         // In synchronous GPU mode, block the caller until the command has executed | ||||
|         WaitIdle(); | ||||
|     } | ||||
|  | ||||
|     return fence; | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -10,9 +10,8 @@ | ||||
| #include <optional> | ||||
| #include <thread> | ||||
| #include <variant> | ||||
|  | ||||
| #include "common/threadsafe_queue.h" | ||||
| #include "video_core/framebuffer_config.h" | ||||
| #include "video_core/gpu.h" | ||||
|  | ||||
| namespace Tegra { | ||||
| struct FramebufferConfig; | ||||
| @@ -26,10 +25,6 @@ class GraphicsContext; | ||||
| class System; | ||||
| } // namespace Core | ||||
|  | ||||
| namespace VideoCore { | ||||
| class RendererBase; | ||||
| } // namespace VideoCore | ||||
|  | ||||
| namespace VideoCommon::GPUThread { | ||||
|  | ||||
| /// Command to signal to the GPU thread that processing has ended | ||||
| @@ -117,7 +112,7 @@ struct SynchState final { | ||||
| /// Class used to manage the GPU thread | ||||
| class ThreadManager final { | ||||
| public: | ||||
|     explicit ThreadManager(Core::System& system_, bool is_async_); | ||||
|     explicit ThreadManager(Core::System& system_); | ||||
|     ~ThreadManager(); | ||||
|  | ||||
|     /// Creates and starts the GPU thread. | ||||
| @@ -155,7 +150,6 @@ private: | ||||
|     Core::System& system; | ||||
|     std::thread thread; | ||||
|     std::thread::id thread_id; | ||||
|     const bool is_async; | ||||
| }; | ||||
|  | ||||
| } // namespace VideoCommon::GPUThread | ||||
|   | ||||
| @@ -7,6 +7,8 @@ | ||||
| #include "common/logging/log.h" | ||||
| #include "core/core.h" | ||||
| #include "core/settings.h" | ||||
| #include "video_core/gpu_asynch.h" | ||||
| #include "video_core/gpu_synch.h" | ||||
| #include "video_core/renderer_base.h" | ||||
| #include "video_core/renderer_opengl/renderer_opengl.h" | ||||
| #include "video_core/renderer_vulkan/renderer_vulkan.h" | ||||
| @@ -37,9 +39,13 @@ std::unique_ptr<VideoCore::RendererBase> CreateRenderer( | ||||
| namespace VideoCore { | ||||
|  | ||||
| std::unique_ptr<Tegra::GPU> CreateGPU(Core::Frontend::EmuWindow& emu_window, Core::System& system) { | ||||
|     std::unique_ptr<Tegra::GPU> gpu; | ||||
|     const bool use_nvdec = Settings::values.use_nvdec_emulation.GetValue(); | ||||
|     std::unique_ptr<Tegra::GPU> gpu = std::make_unique<Tegra::GPU>( | ||||
|         system, Settings::values.use_asynchronous_gpu_emulation.GetValue(), use_nvdec); | ||||
|     if (Settings::values.use_asynchronous_gpu_emulation.GetValue()) { | ||||
|         gpu = std::make_unique<VideoCommon::GPUAsynch>(system, use_nvdec); | ||||
|     } else { | ||||
|         gpu = std::make_unique<VideoCommon::GPUSynch>(system, use_nvdec); | ||||
|     } | ||||
|  | ||||
|     auto context = emu_window.CreateSharedContext(); | ||||
|     const auto scope = context->Acquire(); | ||||
|   | ||||
| @@ -1589,12 +1589,14 @@ void Config::WriteSettingGlobal(const QString& name, const QVariant& value, bool | ||||
|  | ||||
| void Config::Reload() { | ||||
|     ReadValues(); | ||||
|     Settings::Sanitize(); | ||||
|     // To apply default value changes | ||||
|     SaveValues(); | ||||
|     Settings::Apply(Core::System::GetInstance()); | ||||
| } | ||||
|  | ||||
| void Config::Save() { | ||||
|     Settings::Sanitize(); | ||||
|     SaveValues(); | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -580,8 +580,9 @@ void GMainWindow::InitializeWidgets() { | ||||
|         if (emulation_running) { | ||||
|             return; | ||||
|         } | ||||
|         Settings::values.use_asynchronous_gpu_emulation.SetValue( | ||||
|             !Settings::values.use_asynchronous_gpu_emulation.GetValue()); | ||||
|         const bool is_async = !Settings::values.use_asynchronous_gpu_emulation.GetValue() || | ||||
|                               Settings::values.use_multi_core.GetValue(); | ||||
|         Settings::values.use_asynchronous_gpu_emulation.SetValue(is_async); | ||||
|         async_status_button->setChecked(Settings::values.use_asynchronous_gpu_emulation.GetValue()); | ||||
|         Settings::Apply(Core::System::GetInstance()); | ||||
|     }); | ||||
| @@ -598,13 +599,16 @@ void GMainWindow::InitializeWidgets() { | ||||
|             return; | ||||
|         } | ||||
|         Settings::values.use_multi_core.SetValue(!Settings::values.use_multi_core.GetValue()); | ||||
|         const bool is_async = Settings::values.use_asynchronous_gpu_emulation.GetValue() || | ||||
|                               Settings::values.use_multi_core.GetValue(); | ||||
|         Settings::values.use_asynchronous_gpu_emulation.SetValue(is_async); | ||||
|         async_status_button->setChecked(Settings::values.use_asynchronous_gpu_emulation.GetValue()); | ||||
|         multicore_status_button->setChecked(Settings::values.use_multi_core.GetValue()); | ||||
|         Settings::Apply(Core::System::GetInstance()); | ||||
|     }); | ||||
|     multicore_status_button->setText(tr("MULTICORE")); | ||||
|     multicore_status_button->setCheckable(true); | ||||
|     multicore_status_button->setChecked(Settings::values.use_multi_core.GetValue()); | ||||
|  | ||||
|     statusBar()->insertPermanentWidget(0, multicore_status_button); | ||||
|     statusBar()->insertPermanentWidget(0, async_status_button); | ||||
|  | ||||
| @@ -2529,6 +2533,9 @@ void GMainWindow::UpdateStatusBar() { | ||||
| void GMainWindow::UpdateStatusButtons() { | ||||
|     dock_status_button->setChecked(Settings::values.use_docked_mode.GetValue()); | ||||
|     multicore_status_button->setChecked(Settings::values.use_multi_core.GetValue()); | ||||
|     Settings::values.use_asynchronous_gpu_emulation.SetValue( | ||||
|         Settings::values.use_asynchronous_gpu_emulation.GetValue() || | ||||
|         Settings::values.use_multi_core.GetValue()); | ||||
|     async_status_button->setChecked(Settings::values.use_asynchronous_gpu_emulation.GetValue()); | ||||
|     renderer_status_button->setChecked(Settings::values.renderer_backend.GetValue() == | ||||
|                                        Settings::RendererBackend::Vulkan); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user