early-access version 2003

This commit is contained in:
pineappleEA
2021-08-20 02:44:09 +02:00
parent dde6e66bd1
commit 07a248f589
5 changed files with 166 additions and 216 deletions

View File

@@ -4,242 +4,175 @@
#pragma once
// a simple lockless thread-safe,
// single reader, single writer queue
#include <atomic>
#include <condition_variable>
#include <iostream>
#include <cstddef>
#include <mutex>
#include <optional>
#include <utility>
namespace Common {
/// a more foolproof multiple reader, multiple writer queue
template <typename T>
class MPMCQueue {
#define ABORT() \
do { \
std::cerr << __FILE__ " ERR " << __LINE__ << std::endl; \
abort(); \
} while (0)
class SPSCQueue {
public:
~MPMCQueue() {
Clear();
if (waiting || head || tail) {
// Remove all the ABORT() after 1 month merged without problems
ABORT();
}
SPSCQueue() {
write_ptr = read_ptr = new ElementPtr();
}
~SPSCQueue() {
// this will empty out the whole queue
delete read_ptr;
}
[[nodiscard]] std::size_t Size() const {
return size.load();
}
[[nodiscard]] bool Empty() const {
return Size() == 0;
}
[[nodiscard]] T& Front() const {
return read_ptr->current;
}
template <typename Arg>
void Push(Arg&& t) {
Node* const node = new Node(std::forward<Arg>(t));
if (!node || node == PLACEHOLDER) {
ABORT();
}
while (true) {
if (Node* const previous = tail.load(ACQUIRE)) {
if (Node* exchange = nullptr;
!previous->next.compare_exchange_weak(exchange, node, ACQ_REL)) {
continue;
}
if (tail.exchange(node, ACQ_REL) != previous) {
ABORT();
}
} else {
if (Node* exchange = nullptr;
!tail.compare_exchange_weak(exchange, node, ACQ_REL)) {
continue;
}
for (Node* exchange = nullptr;
!head.compare_exchange_weak(exchange, node, ACQ_REL);)
;
}
break;
}
if (waiting.load(ACQUIRE)) {
std::lock_guard lock{mutex};
condition.notify_one();
}
// create the element, add it to the queue
write_ptr->current = std::forward<Arg>(t);
// set the next pointer to a new element ptr
// then advance the write pointer
ElementPtr* new_ptr = new ElementPtr();
write_ptr->next.store(new_ptr, std::memory_order_release);
write_ptr = new_ptr;
++size;
// cv_mutex must be held or else there will be a missed wakeup if the other thread is in the
// line before cv.wait
// TODO(bunnei): This can be replaced with C++20 waitable atomics when properly supported.
// See discussion on https://github.com/yuzu-emu/yuzu/pull/3173 for details.
std::lock_guard lock{cv_mutex};
cv.notify_one();
}
void Pop() {
--size;
ElementPtr* tmpptr = read_ptr;
// advance the read pointer
read_ptr = tmpptr->next.load();
// set the next element to nullptr to stop the recursive deletion
tmpptr->next.store(nullptr);
delete tmpptr; // this also deletes the element
}
bool Pop(T& t) {
return PopImpl<false>(t);
if (Empty())
return false;
--size;
ElementPtr* tmpptr = read_ptr;
read_ptr = tmpptr->next.load(std::memory_order_acquire);
t = std::move(tmpptr->current);
tmpptr->next.store(nullptr);
delete tmpptr;
return true;
}
void Wait() {
if (Empty()) {
std::unique_lock lock{cv_mutex};
cv.wait(lock, [this]() { return !Empty(); });
}
}
T PopWait() {
Wait();
T t;
if (!PopImpl<true>(t)) {
ABORT();
}
Pop(t);
return t;
}
void Wait() {
if (head.load(ACQUIRE)) {
return;
}
static_cast<void>(waiting.fetch_add(1, ACQ_REL));
std::unique_lock lock{mutex};
while (true) {
if (head.load(ACQUIRE)) {
break;
}
condition.wait(lock);
}
if (!waiting.fetch_sub(1, ACQ_REL)) {
ABORT();
}
}
// not thread-safe
void Clear() {
while (true) {
Node* const last = tail.load(ACQUIRE);
if (!last) {
return;
}
if (Node* exchange = nullptr;
!last->next.compare_exchange_weak(exchange, PLACEHOLDER, ACQ_REL)) {
continue;
}
if (tail.exchange(nullptr, ACQ_REL) != last) {
ABORT();
}
Node* node = head.exchange(nullptr, ACQ_REL);
while (node && node != PLACEHOLDER) {
Node* next = node->next.load(ACQUIRE);
delete node;
node = next;
}
return;
}
size.store(0);
delete read_ptr;
write_ptr = read_ptr = new ElementPtr();
}
private:
template <bool WAIT>
bool PopImpl(T& t) {
std::optional<std::unique_lock<std::mutex>> lock{std::nullopt};
while (true) {
Node* const node = head.load(ACQUIRE);
if (!node) {
if constexpr (!WAIT) {
return false;
}
if (!lock) {
static_cast<void>(waiting.fetch_add(1, ACQ_REL));
lock = std::unique_lock{mutex};
continue;
}
condition.wait(*lock);
continue;
}
Node* const next = node->next.load(ACQUIRE);
if (next) {
if (next == PLACEHOLDER) {
continue;
}
if (Node* exchange = node; !head.compare_exchange_weak(exchange, next, ACQ_REL)) {
continue;
}
} else {
if (Node* exchange = nullptr;
!node->next.compare_exchange_weak(exchange, PLACEHOLDER, ACQ_REL)) {
continue;
}
if (tail.exchange(nullptr, ACQ_REL) != node) {
ABORT();
}
if (head.exchange(nullptr, ACQ_REL) != node) {
ABORT();
}
}
t = std::move(node->value);
delete node;
if (lock) {
if (!waiting.fetch_sub(1, ACQ_REL)) {
ABORT();
}
}
return true;
// stores a pointer to element
// and a pointer to the next ElementPtr
class ElementPtr {
public:
ElementPtr() {}
~ElementPtr() {
ElementPtr* next_ptr = next.load();
if (next_ptr)
delete next_ptr;
}
}
struct Node {
template <typename Arg>
explicit Node(Arg&& t) : value{std::forward<Arg>(t)} {}
Node(const Node&) = delete;
Node& operator=(const Node&) = delete;
Node(Node&&) = delete;
Node& operator=(Node&&) = delete;
const T value;
std::atomic<Node*> next{nullptr};
T current;
std::atomic<ElementPtr*> next{nullptr};
};
// We only need to avoid SEQ_CST on X86
// We can add RELAXED later if we port to ARM and it's too slow
static constexpr auto ACQUIRE = std::memory_order_acquire;
static constexpr auto ACQ_REL = std::memory_order_acq_rel;
static inline const auto PLACEHOLDER = reinterpret_cast<Node*>(1);
std::atomic<Node*> head{nullptr};
std::atomic<Node*> tail{nullptr};
std::atomic_size_t waiting{0};
std::condition_variable condition{};
std::mutex mutex{};
#undef ABORT
ElementPtr* write_ptr;
ElementPtr* read_ptr;
std::atomic_size_t size{0};
std::mutex cv_mutex;
std::condition_variable cv;
};
/// a simple lockless thread-safe,
/// single reader, single writer queue
// a simple thread-safe,
// single reader, multiple writer queue
template <typename T>
class /*[[deprecated("Transition to MPMCQueue")]]*/ SPSCQueue {
class MPSCQueue {
public:
[[nodiscard]] std::size_t Size() const {
return spsc_queue.Size();
}
[[nodiscard]] bool Empty() const {
return spsc_queue.Empty();
}
[[nodiscard]] T& Front() const {
return spsc_queue.Front();
}
template <typename Arg>
void Push(Arg&& t) {
queue.Push(std::forward<Arg>(t));
std::lock_guard lock{write_lock};
spsc_queue.Push(t);
}
void Pop() {
return spsc_queue.Pop();
}
bool Pop(T& t) {
return queue.Pop(t);
return spsc_queue.Pop(t);
}
void Wait() {
queue.Wait();
spsc_queue.Wait();
}
T PopWait() {
return queue.PopWait();
return spsc_queue.PopWait();
}
// not thread-safe
void Clear() {
queue.Clear();
spsc_queue.Clear();
}
private:
MPMCQueue<T> queue{};
};
/// a simple thread-safe,
/// single reader, multiple writer queue
template <typename T>
class /*[[deprecated("Transition to MPMCQueue")]]*/ MPSCQueue {
public:
template <typename Arg>
void Push(Arg&& t) {
queue.Push(std::forward<Arg>(t));
}
bool Pop(T& t) {
return queue.Pop(t);
}
T PopWait() {
return queue.PopWait();
}
private:
MPMCQueue<T> queue{};
SPSCQueue<T> spsc_queue;
std::mutex write_lock;
};
} // namespace Common