early-access version 2203
This commit is contained in:
@@ -4,7 +4,6 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
@@ -20,7 +19,7 @@ public:
|
||||
explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
|
||||
bool IsLockedByCurrentThread() const {
|
||||
return owner_thread.load(std::memory_order::consume) == GetCurrentThreadPointer(kernel);
|
||||
return owner_thread == GetCurrentThreadPointer(kernel);
|
||||
}
|
||||
|
||||
void Lock() {
|
||||
@@ -39,7 +38,7 @@ public:
|
||||
|
||||
// Increment count, take ownership.
|
||||
lock_count = 1;
|
||||
owner_thread.store(GetCurrentThreadPointer(kernel), std::memory_order::release);
|
||||
owner_thread = GetCurrentThreadPointer(kernel);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,7 +53,7 @@ public:
|
||||
SchedulerType::UpdateHighestPriorityThreads(kernel);
|
||||
|
||||
// Note that we no longer hold the lock, and unlock the spinlock.
|
||||
owner_thread.store(nullptr, std::memory_order::release);
|
||||
owner_thread = nullptr;
|
||||
spin_lock.Unlock();
|
||||
|
||||
// Enable scheduling, and perform a rescheduling operation.
|
||||
@@ -66,7 +65,7 @@ private:
|
||||
KernelCore& kernel;
|
||||
KAlignedSpinLock spin_lock{};
|
||||
s32 lock_count{};
|
||||
std::atomic<KThread*> owner_thread{};
|
||||
KThread* owner_thread{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
Reference in New Issue
Block a user