early-access version 1952
This commit is contained in:
@@ -114,10 +114,17 @@ void GPU::WaitFence(u32 syncpoint_id, u32 value) {
|
||||
});
|
||||
}
|
||||
|
||||
void GPU::IncrementSyncPointGuest(const u32 syncpoint_id) {
|
||||
std::lock_guard lock{pre_sync_mutex};
|
||||
auto& syncpoint = pre_syncpoints.at(syncpoint_id);
|
||||
syncpoint++;
|
||||
ProcessFrameRequests(syncpoint_id, syncpoint);
|
||||
}
|
||||
|
||||
void GPU::IncrementSyncPoint(const u32 syncpoint_id) {
|
||||
std::lock_guard lock{sync_mutex};
|
||||
auto& syncpoint = syncpoints.at(syncpoint_id);
|
||||
syncpoint++;
|
||||
std::lock_guard lock{sync_mutex};
|
||||
sync_cv.notify_all();
|
||||
auto& interrupt = syncpt_interrupts.at(syncpoint_id);
|
||||
if (!interrupt.empty()) {
|
||||
@@ -162,25 +169,121 @@ bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void GPU::WaitOnWorkRequest(u64 fence) {
|
||||
std::unique_lock lck{work_request_mutex};
|
||||
request_cv.wait(lck,
|
||||
[&] { return fence >= current_request_fence.load(std::memory_order_relaxed); });
|
||||
}
|
||||
|
||||
u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
|
||||
std::unique_lock lck{flush_request_mutex};
|
||||
const u64 fence = ++last_flush_fence;
|
||||
flush_requests.emplace_back(fence, addr, size);
|
||||
std::unique_lock lck{work_request_mutex};
|
||||
const u64 fence = ++last_request_fence;
|
||||
work_requests.emplace_back(fence, addr, size);
|
||||
return fence;
|
||||
}
|
||||
|
||||
u64 GPU::RequestQueueFrame(u64 id) {
|
||||
std::unique_lock lck{work_request_mutex};
|
||||
const u64 fence = ++last_request_fence;
|
||||
work_requests.emplace_back(fence, id);
|
||||
return fence;
|
||||
}
|
||||
|
||||
void GPU::TickWork() {
|
||||
std::unique_lock lck{flush_request_mutex};
|
||||
while (!flush_requests.empty()) {
|
||||
auto& request = flush_requests.front();
|
||||
std::unique_lock lck{work_request_mutex};
|
||||
while (!work_requests.empty()) {
|
||||
auto request = work_requests.front();
|
||||
const u64 fence = request.fence;
|
||||
const VAddr addr = request.addr;
|
||||
const std::size_t size = request.size;
|
||||
flush_requests.pop_front();
|
||||
flush_request_mutex.unlock();
|
||||
rasterizer->FlushRegion(addr, size);
|
||||
current_flush_fence.store(fence);
|
||||
flush_request_mutex.lock();
|
||||
work_requests.pop_front();
|
||||
work_request_mutex.unlock();
|
||||
switch (request.type) {
|
||||
case RequestType::Flush: {
|
||||
rasterizer->FlushRegion(request.flush.addr, request.flush.size);
|
||||
break;
|
||||
}
|
||||
case RequestType::QueueFrame: {
|
||||
Tegra::FramebufferConfig frame_info;
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(frame_requests_mutex);
|
||||
const u64 searching_id = request.queue_frame.id;
|
||||
auto it = std::find_if(
|
||||
frame_queue_items.begin(), frame_queue_items.end(),
|
||||
[searching_id](const FrameQueue& item) { return item.id == searching_id; });
|
||||
ASSERT(it != frame_queue_items.end());
|
||||
frame_info = it->frame_info;
|
||||
frame_queue_items.erase(it);
|
||||
}
|
||||
renderer->SwapBuffers(&frame_info);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
LOG_ERROR(HW_GPU, "Unknown work request type={}", request.type);
|
||||
}
|
||||
}
|
||||
current_request_fence.store(fence, std::memory_order_release);
|
||||
work_request_mutex.lock();
|
||||
request_cv.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
void GPU::QueueFrame(const Tegra::FramebufferConfig* framebuffer,
|
||||
const Service::Nvidia::MultiFence& fences) {
|
||||
std::unique_lock<std::mutex> lock(frame_requests_mutex);
|
||||
if (fences.num_fences == 0) {
|
||||
u64 new_queue_id = frame_queue_ids++;
|
||||
FrameQueue item{
|
||||
.frame_info = *framebuffer,
|
||||
.id = new_queue_id,
|
||||
};
|
||||
frame_queue_items.push_back(item);
|
||||
RequestQueueFrame(new_queue_id);
|
||||
return;
|
||||
}
|
||||
u64 new_id = frame_request_ids++;
|
||||
FrameRequest request{
|
||||
.frame_info = *framebuffer,
|
||||
.count = 0,
|
||||
.id = new_id,
|
||||
};
|
||||
std::unique_lock lck{pre_sync_mutex};
|
||||
for (size_t i = 0; i < fences.num_fences; i++) {
|
||||
auto& fence = fences.fences[i];
|
||||
if (pre_syncpoints[fence.id].load(std::memory_order_relaxed) < fence.value) {
|
||||
const FrameTrigger trigger{
|
||||
.id = new_id,
|
||||
.sync_point_value = fence.value,
|
||||
};
|
||||
frame_triggers[fence.id].push_back(trigger);
|
||||
++request.count;
|
||||
}
|
||||
}
|
||||
if (request.count == 0) {
|
||||
lck.unlock();
|
||||
gpu_thread.SwapBuffers(framebuffer);
|
||||
return;
|
||||
}
|
||||
frame_requests.emplace(new_id, request);
|
||||
}
|
||||
|
||||
void GPU::ProcessFrameRequests(u32 syncpoint_id, u32 new_value) {
|
||||
auto& list = frame_triggers[syncpoint_id];
|
||||
if (list.empty()) {
|
||||
return;
|
||||
}
|
||||
auto it = list.begin();
|
||||
while (it != list.end()) {
|
||||
if (it->sync_point_value <= new_value) {
|
||||
auto obj = frame_requests.find(it->id);
|
||||
--obj->second.count;
|
||||
if (obj->second.count == 0) {
|
||||
rasterizer->FlushCommands();
|
||||
renderer->SwapBuffers(&obj->second.frame_info);
|
||||
frame_requests.erase(obj);
|
||||
}
|
||||
it = list.erase(it);
|
||||
continue;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -399,7 +502,7 @@ void GPU::ProcessFenceActionMethod() {
|
||||
WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
|
||||
break;
|
||||
case FenceOperation::Increment:
|
||||
IncrementSyncPoint(regs.fence_action.syncpoint_id);
|
||||
rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id);
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
|
||||
|
@@ -159,11 +159,16 @@ public:
|
||||
void OnCommandListEnd();
|
||||
|
||||
/// Request a host GPU memory flush from the CPU.
|
||||
[[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size);
|
||||
u64 RequestFlush(VAddr addr, std::size_t size);
|
||||
|
||||
void WaitOnWorkRequest(u64 fence);
|
||||
|
||||
void QueueFrame(const Tegra::FramebufferConfig* framebuffer,
|
||||
const Service::Nvidia::MultiFence& fence);
|
||||
|
||||
/// Obtains current flush request fence id.
|
||||
[[nodiscard]] u64 CurrentFlushRequestFence() const {
|
||||
return current_flush_fence.load(std::memory_order_relaxed);
|
||||
[[nodiscard]] u64 CurrentWorkRequestFence() const {
|
||||
return current_request_fence.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/// Tick pending requests within the GPU.
|
||||
@@ -225,6 +230,7 @@ public:
|
||||
/// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
|
||||
void WaitFence(u32 syncpoint_id, u32 value);
|
||||
|
||||
void IncrementSyncPointGuest(u32 syncpoint_id);
|
||||
void IncrementSyncPoint(u32 syncpoint_id);
|
||||
|
||||
[[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const;
|
||||
@@ -365,6 +371,34 @@ private:
|
||||
/// Determines where the method should be executed.
|
||||
[[nodiscard]] bool ExecuteMethodOnEngine(u32 method);
|
||||
|
||||
struct FrameRequest {
|
||||
Tegra::FramebufferConfig frame_info;
|
||||
size_t count;
|
||||
u64 id;
|
||||
};
|
||||
|
||||
struct FrameTrigger {
|
||||
u64 id;
|
||||
u32 sync_point_value;
|
||||
};
|
||||
|
||||
struct FrameQueue {
|
||||
Tegra::FramebufferConfig frame_info;
|
||||
u64 id;
|
||||
};
|
||||
|
||||
/// Request a frame release on the GPU thread
|
||||
u64 RequestQueueFrame(u64 id);
|
||||
|
||||
void ProcessFrameRequests(u32 syncpoint_id, u32 new_value);
|
||||
|
||||
std::mutex frame_requests_mutex;
|
||||
std::unordered_map<u32, std::list<FrameTrigger>> frame_triggers;
|
||||
std::unordered_map<u64, FrameRequest> frame_requests;
|
||||
std::list<FrameQueue> frame_queue_items;
|
||||
u64 frame_queue_ids{};
|
||||
u64 frame_request_ids{};
|
||||
|
||||
protected:
|
||||
Core::System& system;
|
||||
std::unique_ptr<Tegra::MemoryManager> memory_manager;
|
||||
@@ -392,27 +426,50 @@ private:
|
||||
/// When true, we are about to shut down emulation session, so terminate outstanding tasks
|
||||
std::atomic_bool shutting_down{};
|
||||
|
||||
std::array<std::atomic<u32>, Service::Nvidia::MaxSyncPoints> pre_syncpoints{};
|
||||
std::array<std::atomic<u32>, Service::Nvidia::MaxSyncPoints> syncpoints{};
|
||||
|
||||
std::array<std::list<u32>, Service::Nvidia::MaxSyncPoints> syncpt_interrupts;
|
||||
|
||||
std::mutex pre_sync_mutex;
|
||||
std::mutex sync_mutex;
|
||||
std::mutex device_mutex;
|
||||
|
||||
std::condition_variable sync_cv;
|
||||
|
||||
struct FlushRequest {
|
||||
explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_)
|
||||
: fence{fence_}, addr{addr_}, size{size_} {}
|
||||
u64 fence;
|
||||
VAddr addr;
|
||||
std::size_t size;
|
||||
enum class RequestType : u32 {
|
||||
Flush = 0,
|
||||
QueueFrame = 1,
|
||||
};
|
||||
|
||||
std::list<FlushRequest> flush_requests;
|
||||
std::atomic<u64> current_flush_fence{};
|
||||
u64 last_flush_fence{};
|
||||
std::mutex flush_request_mutex;
|
||||
struct WorkRequest {
|
||||
explicit WorkRequest(u64 fence_, VAddr addr_, std::size_t size_)
|
||||
: fence{fence_}, type{RequestType::Flush} {
|
||||
flush.addr = addr_;
|
||||
flush.size = size_;
|
||||
}
|
||||
|
||||
explicit WorkRequest(u64 fence_, u64 id) : fence{fence_}, type{RequestType::QueueFrame} {
|
||||
queue_frame.id = id;
|
||||
}
|
||||
u64 fence;
|
||||
union {
|
||||
struct {
|
||||
VAddr addr;
|
||||
std::size_t size;
|
||||
} flush;
|
||||
struct {
|
||||
u64 id;
|
||||
} queue_frame;
|
||||
};
|
||||
RequestType type;
|
||||
}; // namespace Tegra
|
||||
|
||||
std::list<WorkRequest> work_requests;
|
||||
std::atomic<u64> current_request_fence{};
|
||||
u64 last_request_fence{};
|
||||
std::mutex work_request_mutex;
|
||||
std::condition_variable request_cv;
|
||||
|
||||
const bool is_async;
|
||||
|
||||
|
@@ -105,7 +105,7 @@ void ThreadManager::FlushRegion(VAddr addr, u64 size) {
|
||||
auto& gpu = system.GPU();
|
||||
u64 fence = gpu.RequestFlush(addr, size);
|
||||
PushCommand(GPUTickCommand(), true);
|
||||
ASSERT(fence <= gpu.CurrentFlushRequestFence());
|
||||
ASSERT(fence <= gpu.CurrentWorkRequestFence());
|
||||
}
|
||||
|
||||
void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
|
||||
|
@@ -214,6 +214,8 @@ void RasterizerOpenGL::Clear() {
|
||||
void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
|
||||
MICROPROFILE_SCOPE(OpenGL_Drawing);
|
||||
|
||||
SCOPE_EXIT({ gpu.TickWork(); });
|
||||
|
||||
query_cache.UpdateCounters();
|
||||
|
||||
SyncState();
|
||||
@@ -269,8 +271,6 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
|
||||
|
||||
++num_queued_commands;
|
||||
has_written_global_memory |= pipeline->WritesGlobalMemory();
|
||||
|
||||
gpu.TickWork();
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::DispatchCompute() {
|
||||
@@ -421,6 +421,7 @@ void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) {
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::SignalSyncPoint(u32 value) {
|
||||
gpu.IncrementSyncPointGuest(value);
|
||||
if (!gpu.IsAsync()) {
|
||||
gpu.IncrementSyncPoint(value);
|
||||
return;
|
||||
|
@@ -412,6 +412,7 @@ void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) {
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SignalSyncPoint(u32 value) {
|
||||
gpu.IncrementSyncPointGuest(value);
|
||||
if (!gpu.IsAsync()) {
|
||||
gpu.IncrementSyncPoint(value);
|
||||
return;
|
||||
|
Reference in New Issue
Block a user