early-access version 2665
This commit is contained in:
parent
f9f5321781
commit
7a13d2c502
@ -1,7 +1,7 @@
|
||||
yuzu emulator early access
|
||||
=============
|
||||
|
||||
This is the source code for early-access 2663.
|
||||
This is the source code for early-access 2665.
|
||||
|
||||
## Legal Notice
|
||||
|
||||
|
@ -149,7 +149,7 @@ public:
|
||||
}
|
||||
|
||||
void Unmap(size_t virtual_offset, size_t length) {
|
||||
std::lock_guard lock{placeholder_mutex};
|
||||
std::scoped_lock lock{placeholder_mutex};
|
||||
|
||||
// Unmap until there are no more placeholders
|
||||
while (UnmapOnePlaceholder(virtual_offset, length)) {
|
||||
@ -169,7 +169,7 @@ public:
|
||||
}
|
||||
const size_t virtual_end = virtual_offset + length;
|
||||
|
||||
std::lock_guard lock{placeholder_mutex};
|
||||
std::scoped_lock lock{placeholder_mutex};
|
||||
auto [it, end] = placeholders.equal_range({virtual_offset, virtual_end});
|
||||
while (it != end) {
|
||||
const size_t offset = std::max(it->lower(), virtual_offset);
|
||||
|
@ -17,7 +17,7 @@ namespace Common {
|
||||
class Event {
|
||||
public:
|
||||
void Set() {
|
||||
std::lock_guard lk{mutex};
|
||||
std::scoped_lock lk{mutex};
|
||||
if (!is_set) {
|
||||
is_set = true;
|
||||
condvar.notify_one();
|
||||
|
@ -52,7 +52,7 @@ public:
|
||||
// line before cv.wait
|
||||
// TODO(bunnei): This can be replaced with C++20 waitable atomics when properly supported.
|
||||
// See discussion on https://github.com/yuzu-emu/yuzu/pull/3173 for details.
|
||||
std::lock_guard lock{cv_mutex};
|
||||
std::scoped_lock lock{cv_mutex};
|
||||
cv.notify_one();
|
||||
}
|
||||
|
||||
@ -159,7 +159,7 @@ public:
|
||||
|
||||
template <typename Arg>
|
||||
void Push(Arg&& t) {
|
||||
std::lock_guard lock{write_lock};
|
||||
std::scoped_lock lock{write_lock};
|
||||
spsc_queue.Push(t);
|
||||
}
|
||||
|
||||
|
@ -148,29 +148,33 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const {
|
||||
|
||||
// LayeredExeFS
|
||||
const auto load_dir = fs_controller.GetModificationLoadRoot(title_id);
|
||||
const auto sdmc_load_dir = fs_controller.GetSDMCModificationLoadRoot(title_id);
|
||||
|
||||
std::vector<VirtualDir> patch_dirs = {sdmc_load_dir};
|
||||
if (load_dir != nullptr && load_dir->GetSize() > 0) {
|
||||
auto patch_dirs = load_dir->GetSubdirectories();
|
||||
std::sort(
|
||||
patch_dirs.begin(), patch_dirs.end(),
|
||||
[](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); });
|
||||
const auto load_patch_dirs = load_dir->GetSubdirectories();
|
||||
patch_dirs.insert(patch_dirs.end(), load_patch_dirs.begin(), load_patch_dirs.end());
|
||||
}
|
||||
|
||||
std::vector<VirtualDir> layers;
|
||||
layers.reserve(patch_dirs.size() + 1);
|
||||
for (const auto& subdir : patch_dirs) {
|
||||
if (std::find(disabled.begin(), disabled.end(), subdir->GetName()) != disabled.end())
|
||||
continue;
|
||||
std::sort(patch_dirs.begin(), patch_dirs.end(),
|
||||
[](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); });
|
||||
|
||||
auto exefs_dir = FindSubdirectoryCaseless(subdir, "exefs");
|
||||
if (exefs_dir != nullptr)
|
||||
layers.push_back(std::move(exefs_dir));
|
||||
}
|
||||
layers.push_back(exefs);
|
||||
std::vector<VirtualDir> layers;
|
||||
layers.reserve(patch_dirs.size() + 1);
|
||||
for (const auto& subdir : patch_dirs) {
|
||||
if (std::find(disabled.begin(), disabled.end(), subdir->GetName()) != disabled.end())
|
||||
continue;
|
||||
|
||||
auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers));
|
||||
if (layered != nullptr) {
|
||||
LOG_INFO(Loader, " ExeFS: LayeredExeFS patches applied successfully");
|
||||
exefs = std::move(layered);
|
||||
}
|
||||
auto exefs_dir = FindSubdirectoryCaseless(subdir, "exefs");
|
||||
if (exefs_dir != nullptr)
|
||||
layers.push_back(std::move(exefs_dir));
|
||||
}
|
||||
layers.push_back(exefs);
|
||||
|
||||
auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers));
|
||||
if (layered != nullptr) {
|
||||
LOG_INFO(Loader, " ExeFS: LayeredExeFS patches applied successfully");
|
||||
exefs = std::move(layered);
|
||||
}
|
||||
|
||||
if (Settings::values.dump_exefs) {
|
||||
@ -536,11 +540,20 @@ PatchManager::PatchVersionNames PatchManager::GetPatchVersionNames(VirtualFile u
|
||||
|
||||
// SDMC mod directory (RomFS LayeredFS)
|
||||
const auto sdmc_mod_dir = fs_controller.GetSDMCModificationLoadRoot(title_id);
|
||||
if (sdmc_mod_dir != nullptr && sdmc_mod_dir->GetSize() > 0 &&
|
||||
IsDirValidAndNonEmpty(FindSubdirectoryCaseless(sdmc_mod_dir, "romfs"))) {
|
||||
const auto mod_disabled =
|
||||
std::find(disabled.begin(), disabled.end(), "SDMC") != disabled.end();
|
||||
out.insert_or_assign(mod_disabled ? "[D] SDMC" : "SDMC", "LayeredFS");
|
||||
if (sdmc_mod_dir != nullptr && sdmc_mod_dir->GetSize() > 0) {
|
||||
std::string types;
|
||||
if (IsDirValidAndNonEmpty(FindSubdirectoryCaseless(sdmc_mod_dir, "exefs"))) {
|
||||
AppendCommaIfNotEmpty(types, "LayeredExeFS");
|
||||
}
|
||||
if (IsDirValidAndNonEmpty(FindSubdirectoryCaseless(sdmc_mod_dir, "romfs"))) {
|
||||
AppendCommaIfNotEmpty(types, "LayeredFS");
|
||||
}
|
||||
|
||||
if (!types.empty()) {
|
||||
const auto mod_disabled =
|
||||
std::find(disabled.begin(), disabled.end(), "SDMC") != disabled.end();
|
||||
out.insert_or_assign(mod_disabled ? "[D] SDMC" : "SDMC", types);
|
||||
}
|
||||
}
|
||||
|
||||
// DLC
|
||||
|
@ -346,7 +346,8 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
||||
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
bool invalidate_entire_icache) {
|
||||
// Validate the mapping request.
|
||||
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
@ -396,7 +397,11 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std
|
||||
bool reprotected_pages = false;
|
||||
SCOPE_EXIT({
|
||||
if (reprotected_pages && any_code_pages) {
|
||||
system.InvalidateCpuInstructionCacheRange(dst_address, size);
|
||||
if (invalidate_entire_icache) {
|
||||
system.InvalidateCpuInstructionCaches();
|
||||
} else {
|
||||
system.InvalidateCpuInstructionCacheRange(dst_address, size);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@ -563,6 +568,8 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
||||
block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
|
||||
KMemoryAttribute::None);
|
||||
|
||||
system.InvalidateCpuInstructionCaches();
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,8 @@ public:
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
bool invalidate_entire_icache);
|
||||
ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr);
|
||||
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
|
@ -99,7 +99,7 @@ struct KernelCore::Impl {
|
||||
// Close all open server sessions and ports.
|
||||
std::unordered_set<KAutoObject*> server_objects_;
|
||||
{
|
||||
std::lock_guard lk(server_objects_lock);
|
||||
std::scoped_lock lk(server_objects_lock);
|
||||
server_objects_ = server_objects;
|
||||
server_objects.clear();
|
||||
}
|
||||
@ -158,7 +158,7 @@ struct KernelCore::Impl {
|
||||
|
||||
// Close kernel objects that were not freed on shutdown
|
||||
{
|
||||
std::lock_guard lk(registered_in_use_objects_lock);
|
||||
std::scoped_lock lk{registered_in_use_objects_lock};
|
||||
if (registered_in_use_objects.size()) {
|
||||
for (auto& object : registered_in_use_objects) {
|
||||
object->Close();
|
||||
@ -179,10 +179,10 @@ struct KernelCore::Impl {
|
||||
|
||||
// Track kernel objects that were not freed on shutdown
|
||||
{
|
||||
std::lock_guard lk(registered_objects_lock);
|
||||
std::scoped_lock lk{registered_objects_lock};
|
||||
if (registered_objects.size()) {
|
||||
LOG_CRITICAL(Kernel, "{} kernel objects were dangling on shutdown!",
|
||||
registered_objects.size());
|
||||
LOG_DEBUG(Kernel, "{} kernel objects were dangling on shutdown!",
|
||||
registered_objects.size());
|
||||
registered_objects.clear();
|
||||
}
|
||||
}
|
||||
@ -673,12 +673,12 @@ struct KernelCore::Impl {
|
||||
}
|
||||
|
||||
void RegisterServerObject(KAutoObject* server_object) {
|
||||
std::lock_guard lk(server_objects_lock);
|
||||
std::scoped_lock lk(server_objects_lock);
|
||||
server_objects.insert(server_object);
|
||||
}
|
||||
|
||||
void UnregisterServerObject(KAutoObject* server_object) {
|
||||
std::lock_guard lk(server_objects_lock);
|
||||
std::scoped_lock lk(server_objects_lock);
|
||||
server_objects.erase(server_object);
|
||||
}
|
||||
|
||||
@ -954,22 +954,22 @@ void KernelCore::UnregisterServerObject(KAutoObject* server_object) {
|
||||
}
|
||||
|
||||
void KernelCore::RegisterKernelObject(KAutoObject* object) {
|
||||
std::lock_guard lk(impl->registered_objects_lock);
|
||||
std::scoped_lock lk{impl->registered_objects_lock};
|
||||
impl->registered_objects.insert(object);
|
||||
}
|
||||
|
||||
void KernelCore::UnregisterKernelObject(KAutoObject* object) {
|
||||
std::lock_guard lk(impl->registered_objects_lock);
|
||||
std::scoped_lock lk{impl->registered_objects_lock};
|
||||
impl->registered_objects.erase(object);
|
||||
}
|
||||
|
||||
void KernelCore::RegisterInUseObject(KAutoObject* object) {
|
||||
std::lock_guard lk(impl->registered_in_use_objects_lock);
|
||||
std::scoped_lock lk{impl->registered_in_use_objects_lock};
|
||||
impl->registered_in_use_objects.insert(object);
|
||||
}
|
||||
|
||||
void KernelCore::UnregisterInUseObject(KAutoObject* object) {
|
||||
std::lock_guard lk(impl->registered_in_use_objects_lock);
|
||||
std::scoped_lock lk{impl->registered_in_use_objects_lock};
|
||||
impl->registered_in_use_objects.erase(object);
|
||||
}
|
||||
|
||||
|
@ -1713,7 +1713,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
|
||||
return ResultInvalidMemoryRegion;
|
||||
}
|
||||
|
||||
return page_table.UnmapCodeMemory(dst_address, src_address, size);
|
||||
return page_table.UnmapCodeMemory(dst_address, src_address, size, true);
|
||||
}
|
||||
|
||||
/// Exits the current process
|
||||
|
@ -24,7 +24,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||
}
|
||||
|
||||
void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
if (nanoseconds > 0) {
|
||||
ASSERT(thread);
|
||||
ASSERT(thread->GetState() != ThreadState::Runnable);
|
||||
@ -35,7 +35,7 @@ void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
|
||||
}
|
||||
|
||||
void TimeManager::UnscheduleTimeEvent(KThread* thread) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
system.CoreTiming().UnscheduleEvent(time_manager_event_type,
|
||||
reinterpret_cast<uintptr_t>(thread));
|
||||
}
|
||||
|
@ -318,7 +318,7 @@ void Controller_NPad::OnRelease() {
|
||||
}
|
||||
|
||||
void Controller_NPad::RequestPadStateUpdate(Core::HID::NpadIdType npad_id) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
auto& controller = GetControllerFromNpadIdType(npad_id);
|
||||
const auto controller_type = controller.device->GetNpadStyleIndex();
|
||||
if (!controller.device->IsConnected()) {
|
||||
|
@ -389,8 +389,8 @@ public:
|
||||
|
||||
if (bss_size) {
|
||||
auto block_guard = detail::ScopeExit([&] {
|
||||
page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size);
|
||||
page_table.UnmapCodeMemory(addr, nro_addr, nro_size);
|
||||
page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size, false);
|
||||
page_table.UnmapCodeMemory(addr, nro_addr, nro_size, false);
|
||||
});
|
||||
|
||||
const ResultCode result{
|
||||
@ -572,15 +572,17 @@ public:
|
||||
if (info.bss_size != 0) {
|
||||
CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address + info.text_size +
|
||||
info.ro_size + info.data_size,
|
||||
info.bss_address, info.bss_size));
|
||||
info.bss_address, info.bss_size, false));
|
||||
}
|
||||
|
||||
CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size,
|
||||
info.src_addr + info.text_size + info.ro_size,
|
||||
info.data_size));
|
||||
info.data_size, false));
|
||||
CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address + info.text_size,
|
||||
info.src_addr + info.text_size, info.ro_size));
|
||||
CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size));
|
||||
info.src_addr + info.text_size, info.ro_size,
|
||||
false));
|
||||
CASCADE_CODE(
|
||||
page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size, false));
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco
|
||||
return Status::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(mutex);
|
||||
std::scoped_lock lock{mutex};
|
||||
|
||||
if (const auto status = AcquireBufferLocked(item, present_when); status != Status::NoError) {
|
||||
if (status != Status::NoBufferAvailable) {
|
||||
@ -40,7 +40,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco
|
||||
}
|
||||
|
||||
Status BufferItemConsumer::ReleaseBuffer(const BufferItem& item, Fence& release_fence) {
|
||||
std::scoped_lock lock(mutex);
|
||||
std::scoped_lock lock{mutex};
|
||||
|
||||
if (const auto status = AddReleaseFenceLocked(item.buf, item.graphic_buffer, release_fence);
|
||||
status != Status::NoError) {
|
||||
|
@ -19,7 +19,7 @@ BufferQueueConsumer::~BufferQueueConsumer() = default;
|
||||
|
||||
Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
|
||||
std::chrono::nanoseconds expected_present) {
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
// Check that the consumer doesn't currently have the maximum number of buffers acquired.
|
||||
const s32 num_acquired_buffers{
|
||||
@ -120,7 +120,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
|
||||
|
||||
std::shared_ptr<IProducerListener> listener;
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
// If the frame number has changed because the buffer has been reallocated, we can ignore
|
||||
// this ReleaseBuffer for the old buffer.
|
||||
@ -180,7 +180,7 @@ Status BufferQueueConsumer::Connect(std::shared_ptr<IConsumerListener> consumer_
|
||||
|
||||
LOG_DEBUG(Service_NVFlinger, "controlled_by_app={}", controlled_by_app);
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@ -199,7 +199,7 @@ Status BufferQueueConsumer::GetReleasedBuffers(u64* out_slot_mask) {
|
||||
return Status::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
|
@ -15,7 +15,7 @@ BufferQueueCore::BufferQueueCore() = default;
|
||||
BufferQueueCore::~BufferQueueCore() = default;
|
||||
|
||||
void BufferQueueCore::NotifyShutdown() {
|
||||
std::scoped_lock lock(mutex);
|
||||
std::scoped_lock lock{mutex};
|
||||
|
||||
is_shutting_down = true;
|
||||
|
||||
|
@ -38,7 +38,7 @@ BufferQueueProducer::~BufferQueueProducer() {
|
||||
Status BufferQueueProducer::RequestBuffer(s32 slot, std::shared_ptr<GraphicBuffer>* buf) {
|
||||
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@ -65,7 +65,7 @@ Status BufferQueueProducer::SetBufferCount(s32 buffer_count) {
|
||||
|
||||
std::shared_ptr<IConsumerListener> listener;
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
core->WaitWhileAllocatingLocked();
|
||||
|
||||
if (core->is_abandoned) {
|
||||
@ -236,7 +236,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
|
||||
Status return_flags = Status::NoError;
|
||||
bool attached_by_consumer = false;
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
core->WaitWhileAllocatingLocked();
|
||||
|
||||
if (format == PixelFormat::NoFormat) {
|
||||
@ -295,7 +295,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
|
||||
}
|
||||
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@ -320,7 +320,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
|
||||
Status BufferQueueProducer::DetachBuffer(s32 slot) {
|
||||
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@ -356,7 +356,7 @@ Status BufferQueueProducer::DetachNextBuffer(std::shared_ptr<GraphicBuffer>* out
|
||||
return Status::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
core->WaitWhileAllocatingLocked();
|
||||
|
||||
if (core->is_abandoned) {
|
||||
@ -399,7 +399,7 @@ Status BufferQueueProducer::AttachBuffer(s32* out_slot,
|
||||
return Status::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
core->WaitWhileAllocatingLocked();
|
||||
|
||||
Status return_flags = Status::NoError;
|
||||
@ -460,7 +460,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
|
||||
BufferItem item;
|
||||
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@ -576,7 +576,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
|
||||
// Call back without the main BufferQueue lock held, but with the callback lock held so we can
|
||||
// ensure that callbacks occur in order
|
||||
{
|
||||
std::scoped_lock lock(callback_mutex);
|
||||
std::scoped_lock lock{callback_mutex};
|
||||
while (callback_ticket != current_callback_ticket) {
|
||||
callback_condition.wait(callback_mutex);
|
||||
}
|
||||
@ -597,7 +597,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
|
||||
void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
|
||||
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@ -623,7 +623,7 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
|
||||
}
|
||||
|
||||
Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (out_value == nullptr) {
|
||||
LOG_ERROR(Service_NVFlinger, "outValue was nullptr");
|
||||
@ -673,7 +673,7 @@ Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
|
||||
Status BufferQueueProducer::Connect(const std::shared_ptr<IProducerListener>& listener,
|
||||
NativeWindowApi api, bool producer_controlled_by_app,
|
||||
QueueBufferOutput* output) {
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
LOG_DEBUG(Service_NVFlinger, "api = {} producer_controlled_by_app = {}", api,
|
||||
producer_controlled_by_app);
|
||||
@ -730,7 +730,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) {
|
||||
std::shared_ptr<IConsumerListener> listener;
|
||||
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
core->WaitWhileAllocatingLocked();
|
||||
|
||||
@ -780,7 +780,7 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot,
|
||||
return Status::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
slots[slot] = {};
|
||||
slots[slot].graphic_buffer = buffer;
|
||||
|
@ -18,7 +18,7 @@ ConsumerBase::ConsumerBase(std::unique_ptr<BufferQueueConsumer> consumer_)
|
||||
: consumer{std::move(consumer_)} {}
|
||||
|
||||
ConsumerBase::~ConsumerBase() {
|
||||
std::scoped_lock lock(mutex);
|
||||
std::scoped_lock lock{mutex};
|
||||
|
||||
ASSERT_MSG(is_abandoned, "consumer is not abandoned!");
|
||||
}
|
||||
@ -44,7 +44,7 @@ void ConsumerBase::OnFrameReplaced(const BufferItem& item) {
|
||||
}
|
||||
|
||||
void ConsumerBase::OnBuffersReleased() {
|
||||
std::scoped_lock lock(mutex);
|
||||
std::scoped_lock lock{mutex};
|
||||
|
||||
LOG_DEBUG(Service_NVFlinger, "called");
|
||||
|
||||
|
@ -14,7 +14,7 @@ HosBinderDriverServer::HosBinderDriverServer(Core::System& system_)
|
||||
HosBinderDriverServer::~HosBinderDriverServer() {}
|
||||
|
||||
u64 HosBinderDriverServer::RegisterProducer(std::unique_ptr<android::IBinder>&& binder) {
|
||||
std::lock_guard lk{lock};
|
||||
std::scoped_lock lk{lock};
|
||||
|
||||
last_id++;
|
||||
|
||||
@ -24,7 +24,7 @@ u64 HosBinderDriverServer::RegisterProducer(std::unique_ptr<android::IBinder>&&
|
||||
}
|
||||
|
||||
android::IBinder* HosBinderDriverServer::TryGetProducer(u64 id) {
|
||||
std::lock_guard lk{lock};
|
||||
std::scoped_lock lk{lock};
|
||||
|
||||
if (auto search = producers.find(id); search != producers.end()) {
|
||||
return search->second.get();
|
||||
|
@ -689,6 +689,9 @@ Errno BSD::SetSockOptImpl(s32 fd, u32 level, OptName optname, size_t optlen, con
|
||||
case OptName::REUSEADDR:
|
||||
ASSERT(value == 0 || value == 1);
|
||||
return Translate(socket->SetReuseAddr(value != 0));
|
||||
case OptName::KEEPALIVE:
|
||||
ASSERT(value == 0 || value == 1);
|
||||
return Translate(socket->SetKeepAlive(value != 0));
|
||||
case OptName::BROADCAST:
|
||||
ASSERT(value == 0 || value == 1);
|
||||
return Translate(socket->SetBroadcast(value != 0));
|
||||
|
@ -2,8 +2,24 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/string_util.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/service/sockets/sfdnsres.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <ws2tcpip.h>
|
||||
#elif YUZU_UNIX
|
||||
#include <arpa/inet.h>
|
||||
#include <netdb.h>
|
||||
#include <sys/socket.h>
|
||||
#endif
|
||||
|
||||
namespace Service::Sockets {
|
||||
|
||||
@ -21,7 +37,7 @@ SFDNSRES::SFDNSRES(Core::System& system_) : ServiceFramework{system_, "sfdnsres"
|
||||
{9, nullptr, "CancelRequest"},
|
||||
{10, nullptr, "GetHostByNameRequestWithOptions"},
|
||||
{11, nullptr, "GetHostByAddrRequestWithOptions"},
|
||||
{12, nullptr, "GetAddrInfoRequestWithOptions"},
|
||||
{12, &SFDNSRES::GetAddrInfoRequestWithOptions, "GetAddrInfoRequestWithOptions"},
|
||||
{13, nullptr, "GetNameInfoRequestWithOptions"},
|
||||
{14, nullptr, "ResolverSetOptionRequest"},
|
||||
{15, nullptr, "ResolverGetOptionRequest"},
|
||||
@ -31,7 +47,142 @@ SFDNSRES::SFDNSRES(Core::System& system_) : ServiceFramework{system_, "sfdnsres"
|
||||
|
||||
SFDNSRES::~SFDNSRES() = default;
|
||||
|
||||
void SFDNSRES::GetAddrInfoRequest(Kernel::HLERequestContext& ctx) {
|
||||
enum class NetDbError : s32 {
|
||||
Internal = -1,
|
||||
Success = 0,
|
||||
HostNotFound = 1,
|
||||
TryAgain = 2,
|
||||
NoRecovery = 3,
|
||||
NoData = 4,
|
||||
};
|
||||
|
||||
static NetDbError AddrInfoErrorToNetDbError(s32 result) {
|
||||
// Best effort guess to map errors
|
||||
switch (result) {
|
||||
case 0:
|
||||
return NetDbError::Success;
|
||||
case EAI_AGAIN:
|
||||
return NetDbError::TryAgain;
|
||||
case EAI_NODATA:
|
||||
return NetDbError::NoData;
|
||||
default:
|
||||
return NetDbError::HostNotFound;
|
||||
}
|
||||
}
|
||||
|
||||
static std::vector<u8> SerializeAddrInfo(const addrinfo* addrinfo, s32 result_code,
|
||||
std::string_view host) {
|
||||
// Adapted from
|
||||
// https://github.com/switchbrew/libnx/blob/c5a9a909a91657a9818a3b7e18c9b91ff0cbb6e3/nx/source/runtime/resolver.c#L190
|
||||
std::vector<u8> data;
|
||||
|
||||
auto* current = addrinfo;
|
||||
while (current != nullptr) {
|
||||
struct SerializedResponseHeader {
|
||||
u32 magic;
|
||||
s32 flags;
|
||||
s32 family;
|
||||
s32 socket_type;
|
||||
s32 protocol;
|
||||
u32 address_length;
|
||||
};
|
||||
static_assert(sizeof(SerializedResponseHeader) == 0x18,
|
||||
"Response header size must be 0x18 bytes");
|
||||
|
||||
constexpr auto header_size = sizeof(SerializedResponseHeader);
|
||||
const auto addr_size =
|
||||
current->ai_addr && current->ai_addrlen > 0 ? current->ai_addrlen : 4;
|
||||
const auto canonname_size = current->ai_canonname ? strlen(current->ai_canonname) + 1 : 1;
|
||||
|
||||
const auto last_size = data.size();
|
||||
data.resize(last_size + header_size + addr_size + canonname_size);
|
||||
|
||||
// Header in network byte order
|
||||
SerializedResponseHeader header{};
|
||||
|
||||
constexpr auto HEADER_MAGIC = 0xBEEFCAFE;
|
||||
header.magic = htonl(HEADER_MAGIC);
|
||||
header.family = htonl(current->ai_family);
|
||||
header.flags = htonl(current->ai_flags);
|
||||
header.socket_type = htonl(current->ai_socktype);
|
||||
header.protocol = htonl(current->ai_protocol);
|
||||
header.address_length = current->ai_addr ? htonl((u32)current->ai_addrlen) : 0;
|
||||
|
||||
auto* header_ptr = data.data() + last_size;
|
||||
std::memcpy(header_ptr, &header, header_size);
|
||||
|
||||
if (header.address_length == 0) {
|
||||
std::memset(header_ptr + header_size, 0, 4);
|
||||
} else {
|
||||
switch (current->ai_family) {
|
||||
case AF_INET: {
|
||||
struct SockAddrIn {
|
||||
s16 sin_family;
|
||||
u16 sin_port;
|
||||
u32 sin_addr;
|
||||
u8 sin_zero[8];
|
||||
};
|
||||
|
||||
SockAddrIn serialized_addr{};
|
||||
const auto addr = *reinterpret_cast<sockaddr_in*>(current->ai_addr);
|
||||
serialized_addr.sin_port = htons(addr.sin_port);
|
||||
serialized_addr.sin_family = htons(addr.sin_family);
|
||||
serialized_addr.sin_addr = htonl(addr.sin_addr.s_addr);
|
||||
std::memcpy(header_ptr + header_size, &serialized_addr, sizeof(SockAddrIn));
|
||||
|
||||
char addr_string_buf[64]{};
|
||||
inet_ntop(AF_INET, &addr.sin_addr, addr_string_buf, std::size(addr_string_buf));
|
||||
LOG_INFO(Service, "Resolved host '{}' to IPv4 address {}", host, addr_string_buf);
|
||||
break;
|
||||
}
|
||||
case AF_INET6: {
|
||||
struct SockAddrIn6 {
|
||||
s16 sin6_family;
|
||||
u16 sin6_port;
|
||||
u32 sin6_flowinfo;
|
||||
u8 sin6_addr[16];
|
||||
u32 sin6_scope_id;
|
||||
};
|
||||
|
||||
SockAddrIn6 serialized_addr{};
|
||||
const auto addr = *reinterpret_cast<sockaddr_in6*>(current->ai_addr);
|
||||
serialized_addr.sin6_family = htons(addr.sin6_family);
|
||||
serialized_addr.sin6_port = htons(addr.sin6_port);
|
||||
serialized_addr.sin6_flowinfo = htonl(addr.sin6_flowinfo);
|
||||
serialized_addr.sin6_scope_id = htonl(addr.sin6_scope_id);
|
||||
std::memcpy(serialized_addr.sin6_addr, &addr.sin6_addr,
|
||||
sizeof(SockAddrIn6::sin6_addr));
|
||||
std::memcpy(header_ptr + header_size, &serialized_addr, sizeof(SockAddrIn6));
|
||||
|
||||
char addr_string_buf[64]{};
|
||||
inet_ntop(AF_INET6, &addr.sin6_addr, addr_string_buf, std::size(addr_string_buf));
|
||||
LOG_INFO(Service, "Resolved host '{}' to IPv6 address {}", host, addr_string_buf);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
std::memcpy(header_ptr + header_size, current->ai_addr, addr_size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (current->ai_canonname) {
|
||||
std::memcpy(header_ptr + addr_size, current->ai_canonname, canonname_size);
|
||||
} else {
|
||||
*(header_ptr + header_size + addr_size) = 0;
|
||||
}
|
||||
|
||||
current = current->ai_next;
|
||||
}
|
||||
|
||||
// 4-byte sentinel value
|
||||
data.push_back(0);
|
||||
data.push_back(0);
|
||||
data.push_back(0);
|
||||
data.push_back(0);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
static std::pair<u32, s32> GetAddrInfoRequestImpl(Kernel::HLERequestContext& ctx) {
|
||||
struct Parameters {
|
||||
u8 use_nsd_resolve;
|
||||
u32 unknown;
|
||||
@ -42,11 +193,51 @@ void SFDNSRES::GetAddrInfoRequest(Kernel::HLERequestContext& ctx) {
|
||||
const auto parameters = rp.PopRaw<Parameters>();
|
||||
|
||||
LOG_WARNING(Service,
|
||||
"(STUBBED) called. use_nsd_resolve={}, unknown=0x{:08X}, process_id=0x{:016X}",
|
||||
"called with ignored parameters: use_nsd_resolve={}, unknown={}, process_id={}",
|
||||
parameters.use_nsd_resolve, parameters.unknown, parameters.process_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
const auto host_buffer = ctx.ReadBuffer(0);
|
||||
const std::string host = Common::StringFromBuffer(host_buffer);
|
||||
|
||||
const auto service_buffer = ctx.ReadBuffer(1);
|
||||
const std::string service = Common::StringFromBuffer(service_buffer);
|
||||
|
||||
addrinfo* addrinfo;
|
||||
// Pass null for hints. Serialized hints are also passed in a buffer, but are ignored for now
|
||||
s32 result_code = getaddrinfo(host.c_str(), service.c_str(), nullptr, &addrinfo);
|
||||
|
||||
u32 data_size = 0;
|
||||
if (result_code == 0 && addrinfo != nullptr) {
|
||||
const std::vector<u8>& data = SerializeAddrInfo(addrinfo, result_code, host);
|
||||
data_size = static_cast<u32>(data.size());
|
||||
freeaddrinfo(addrinfo);
|
||||
|
||||
ctx.WriteBuffer(data, 0);
|
||||
}
|
||||
|
||||
return std::make_pair(data_size, result_code);
|
||||
}
|
||||
|
||||
} // namespace Service::Sockets
|
||||
void SFDNSRES::GetAddrInfoRequest(Kernel::HLERequestContext& ctx) {
|
||||
auto [data_size, result_code] = GetAddrInfoRequestImpl(ctx);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push(static_cast<s32>(AddrInfoErrorToNetDbError(result_code))); // NetDBErrorCode
|
||||
rb.Push(result_code); // errno
|
||||
rb.Push(data_size); // serialized size
|
||||
}
|
||||
|
||||
void SFDNSRES::GetAddrInfoRequestWithOptions(Kernel::HLERequestContext& ctx) {
|
||||
// Additional options are ignored
|
||||
auto [data_size, result_code] = GetAddrInfoRequestImpl(ctx);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 5};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push(data_size); // serialized size
|
||||
rb.Push(result_code); // errno
|
||||
rb.Push(static_cast<s32>(AddrInfoErrorToNetDbError(result_code))); // NetDBErrorCode
|
||||
rb.Push(0);
|
||||
}
|
||||
|
||||
} // namespace Service::Sockets
|
@ -19,6 +19,7 @@ public:
|
||||
|
||||
private:
|
||||
void GetAddrInfoRequest(Kernel::HLERequestContext& ctx);
|
||||
void GetAddrInfoRequestWithOptions(Kernel::HLERequestContext& ctx);
|
||||
};
|
||||
|
||||
} // namespace Service::Sockets
|
||||
|
@ -46,6 +46,7 @@ enum class Protocol : u32 {
|
||||
|
||||
enum class OptName : u32 {
|
||||
REUSEADDR = 0x4,
|
||||
KEEPALIVE = 0x8,
|
||||
BROADCAST = 0x20,
|
||||
LINGER = 0x80,
|
||||
SNDBUF = 0x1001,
|
||||
|
@ -600,6 +600,10 @@ Errno Socket::SetReuseAddr(bool enable) {
|
||||
return SetSockOpt<u32>(fd, SO_REUSEADDR, enable ? 1 : 0);
|
||||
}
|
||||
|
||||
Errno Socket::SetKeepAlive(bool enable) {
|
||||
return SetSockOpt<u32>(fd, SO_KEEPALIVE, enable ? 1 : 0);
|
||||
}
|
||||
|
||||
Errno Socket::SetBroadcast(bool enable) {
|
||||
return SetSockOpt<u32>(fd, SO_BROADCAST, enable ? 1 : 0);
|
||||
}
|
||||
|
@ -67,6 +67,8 @@ public:
|
||||
|
||||
Errno SetReuseAddr(bool enable);
|
||||
|
||||
Errno SetKeepAlive(bool enable);
|
||||
|
||||
Errno SetBroadcast(bool enable);
|
||||
|
||||
Errno SetSndBuf(u32 value);
|
||||
|
@ -53,13 +53,13 @@ PerfStats::~PerfStats() {
|
||||
}
|
||||
|
||||
void PerfStats::BeginSystemFrame() {
|
||||
std::lock_guard lock{object_mutex};
|
||||
std::scoped_lock lock{object_mutex};
|
||||
|
||||
frame_begin = Clock::now();
|
||||
}
|
||||
|
||||
void PerfStats::EndSystemFrame() {
|
||||
std::lock_guard lock{object_mutex};
|
||||
std::scoped_lock lock{object_mutex};
|
||||
|
||||
auto frame_end = Clock::now();
|
||||
const auto frame_time = frame_end - frame_begin;
|
||||
@ -79,7 +79,7 @@ void PerfStats::EndGameFrame() {
|
||||
}
|
||||
|
||||
double PerfStats::GetMeanFrametime() const {
|
||||
std::lock_guard lock{object_mutex};
|
||||
std::scoped_lock lock{object_mutex};
|
||||
|
||||
if (current_index <= IgnoreFrames) {
|
||||
return 0;
|
||||
@ -91,7 +91,7 @@ double PerfStats::GetMeanFrametime() const {
|
||||
}
|
||||
|
||||
PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) {
|
||||
std::lock_guard lock{object_mutex};
|
||||
std::scoped_lock lock{object_mutex};
|
||||
|
||||
const auto now = Clock::now();
|
||||
// Walltime elapsed since stats were reset
|
||||
@ -120,7 +120,7 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us
|
||||
}
|
||||
|
||||
double PerfStats::GetLastFrameTimeScale() const {
|
||||
std::lock_guard lock{object_mutex};
|
||||
std::scoped_lock lock{object_mutex};
|
||||
|
||||
constexpr double FRAME_LENGTH = 1.0 / 60;
|
||||
return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH;
|
||||
|
@ -80,7 +80,7 @@ bool Freezer::IsActive() const {
|
||||
}
|
||||
|
||||
void Freezer::Clear() {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
LOG_DEBUG(Common_Memory, "Clearing all frozen memory values.");
|
||||
|
||||
@ -88,7 +88,7 @@ void Freezer::Clear() {
|
||||
}
|
||||
|
||||
u64 Freezer::Freeze(VAddr address, u32 width) {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
const auto current_value = MemoryReadWidth(memory, width, address);
|
||||
entries.push_back({address, width, current_value});
|
||||
@ -101,7 +101,7 @@ u64 Freezer::Freeze(VAddr address, u32 width) {
|
||||
}
|
||||
|
||||
void Freezer::Unfreeze(VAddr address) {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
LOG_DEBUG(Common_Memory, "Unfreezing memory for address={:016X}", address);
|
||||
|
||||
@ -109,13 +109,13 @@ void Freezer::Unfreeze(VAddr address) {
|
||||
}
|
||||
|
||||
bool Freezer::IsFrozen(VAddr address) const {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
return FindEntry(address) != entries.cend();
|
||||
}
|
||||
|
||||
void Freezer::SetFrozenValue(VAddr address, u64 value) {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
const auto iter = FindEntry(address);
|
||||
|
||||
@ -132,7 +132,7 @@ void Freezer::SetFrozenValue(VAddr address, u64 value) {
|
||||
}
|
||||
|
||||
std::optional<Freezer::Entry> Freezer::GetEntry(VAddr address) const {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
const auto iter = FindEntry(address);
|
||||
|
||||
@ -144,7 +144,7 @@ std::optional<Freezer::Entry> Freezer::GetEntry(VAddr address) const {
|
||||
}
|
||||
|
||||
std::vector<Freezer::Entry> Freezer::GetEntries() const {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
return entries;
|
||||
}
|
||||
@ -165,7 +165,7 @@ void Freezer::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
for (const auto& entry : entries) {
|
||||
LOG_DEBUG(Common_Memory,
|
||||
@ -178,7 +178,7 @@ void Freezer::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) {
|
||||
}
|
||||
|
||||
void Freezer::FillEntryReads() {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
LOG_DEBUG(Common_Memory, "Updating memory freeze entries to current values.");
|
||||
|
||||
|
@ -62,7 +62,7 @@ public:
|
||||
|
||||
bool UpdateMotion(SDL_ControllerSensorEvent event) {
|
||||
constexpr float gravity_constant = 9.80665f;
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
const u64 time_difference = event.timestamp - last_motion_update;
|
||||
last_motion_update = event.timestamp;
|
||||
switch (event.sensor) {
|
||||
@ -241,7 +241,7 @@ private:
|
||||
};
|
||||
|
||||
std::shared_ptr<SDLJoystick> SDLDriver::GetSDLJoystickByGUID(const std::string& guid, int port) {
|
||||
std::lock_guard lock{joystick_map_mutex};
|
||||
std::scoped_lock lock{joystick_map_mutex};
|
||||
const auto it = joystick_map.find(guid);
|
||||
|
||||
if (it != joystick_map.end()) {
|
||||
@ -263,7 +263,7 @@ std::shared_ptr<SDLJoystick> SDLDriver::GetSDLJoystickBySDLID(SDL_JoystickID sdl
|
||||
auto sdl_joystick = SDL_JoystickFromInstanceID(sdl_id);
|
||||
const std::string guid = GetGUID(sdl_joystick);
|
||||
|
||||
std::lock_guard lock{joystick_map_mutex};
|
||||
std::scoped_lock lock{joystick_map_mutex};
|
||||
const auto map_it = joystick_map.find(guid);
|
||||
|
||||
if (map_it == joystick_map.end()) {
|
||||
@ -297,7 +297,7 @@ void SDLDriver::InitJoystick(int joystick_index) {
|
||||
|
||||
const std::string guid = GetGUID(sdl_joystick);
|
||||
|
||||
std::lock_guard lock{joystick_map_mutex};
|
||||
std::scoped_lock lock{joystick_map_mutex};
|
||||
if (joystick_map.find(guid) == joystick_map.end()) {
|
||||
auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick, sdl_gamecontroller);
|
||||
PreSetController(joystick->GetPadIdentifier());
|
||||
@ -326,7 +326,7 @@ void SDLDriver::InitJoystick(int joystick_index) {
|
||||
void SDLDriver::CloseJoystick(SDL_Joystick* sdl_joystick) {
|
||||
const std::string guid = GetGUID(sdl_joystick);
|
||||
|
||||
std::lock_guard lock{joystick_map_mutex};
|
||||
std::scoped_lock lock{joystick_map_mutex};
|
||||
// This call to guid is safe since the joystick is guaranteed to be in the map
|
||||
const auto& joystick_guid_list = joystick_map[guid];
|
||||
const auto joystick_it = std::find_if(joystick_guid_list.begin(), joystick_guid_list.end(),
|
||||
@ -392,7 +392,7 @@ void SDLDriver::HandleGameControllerEvent(const SDL_Event& event) {
|
||||
}
|
||||
|
||||
void SDLDriver::CloseJoysticks() {
|
||||
std::lock_guard lock{joystick_map_mutex};
|
||||
std::scoped_lock lock{joystick_map_mutex};
|
||||
joystick_map.clear();
|
||||
}
|
||||
|
||||
|
@ -8,37 +8,37 @@
|
||||
namespace InputCommon {
|
||||
|
||||
void InputEngine::PreSetController(const PadIdentifier& identifier) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
controller_list.try_emplace(identifier);
|
||||
}
|
||||
|
||||
void InputEngine::PreSetButton(const PadIdentifier& identifier, int button) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
ControllerData& controller = controller_list.at(identifier);
|
||||
controller.buttons.try_emplace(button, false);
|
||||
}
|
||||
|
||||
void InputEngine::PreSetHatButton(const PadIdentifier& identifier, int button) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
ControllerData& controller = controller_list.at(identifier);
|
||||
controller.hat_buttons.try_emplace(button, u8{0});
|
||||
}
|
||||
|
||||
void InputEngine::PreSetAxis(const PadIdentifier& identifier, int axis) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
ControllerData& controller = controller_list.at(identifier);
|
||||
controller.axes.try_emplace(axis, 0.0f);
|
||||
}
|
||||
|
||||
void InputEngine::PreSetMotion(const PadIdentifier& identifier, int motion) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
ControllerData& controller = controller_list.at(identifier);
|
||||
controller.motions.try_emplace(motion);
|
||||
}
|
||||
|
||||
void InputEngine::SetButton(const PadIdentifier& identifier, int button, bool value) {
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
ControllerData& controller = controller_list.at(identifier);
|
||||
if (!configuring) {
|
||||
controller.buttons.insert_or_assign(button, value);
|
||||
@ -49,7 +49,7 @@ void InputEngine::SetButton(const PadIdentifier& identifier, int button, bool va
|
||||
|
||||
void InputEngine::SetHatButton(const PadIdentifier& identifier, int button, u8 value) {
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
ControllerData& controller = controller_list.at(identifier);
|
||||
if (!configuring) {
|
||||
controller.hat_buttons.insert_or_assign(button, value);
|
||||
@ -60,7 +60,7 @@ void InputEngine::SetHatButton(const PadIdentifier& identifier, int button, u8 v
|
||||
|
||||
void InputEngine::SetAxis(const PadIdentifier& identifier, int axis, f32 value) {
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
ControllerData& controller = controller_list.at(identifier);
|
||||
if (!configuring) {
|
||||
controller.axes.insert_or_assign(axis, value);
|
||||
@ -71,7 +71,7 @@ void InputEngine::SetAxis(const PadIdentifier& identifier, int axis, f32 value)
|
||||
|
||||
void InputEngine::SetBattery(const PadIdentifier& identifier, Common::Input::BatteryLevel value) {
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
ControllerData& controller = controller_list.at(identifier);
|
||||
if (!configuring) {
|
||||
controller.battery = value;
|
||||
@ -82,7 +82,7 @@ void InputEngine::SetBattery(const PadIdentifier& identifier, Common::Input::Bat
|
||||
|
||||
void InputEngine::SetMotion(const PadIdentifier& identifier, int motion, const BasicMotion& value) {
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
ControllerData& controller = controller_list.at(identifier);
|
||||
if (!configuring) {
|
||||
controller.motions.insert_or_assign(motion, value);
|
||||
@ -92,7 +92,7 @@ void InputEngine::SetMotion(const PadIdentifier& identifier, int motion, const B
|
||||
}
|
||||
|
||||
bool InputEngine::GetButton(const PadIdentifier& identifier, int button) const {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
const auto controller_iter = controller_list.find(identifier);
|
||||
if (controller_iter == controller_list.cend()) {
|
||||
LOG_ERROR(Input, "Invalid identifier guid={}, pad={}, port={}", identifier.guid.RawString(),
|
||||
@ -109,7 +109,7 @@ bool InputEngine::GetButton(const PadIdentifier& identifier, int button) const {
|
||||
}
|
||||
|
||||
bool InputEngine::GetHatButton(const PadIdentifier& identifier, int button, u8 direction) const {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
const auto controller_iter = controller_list.find(identifier);
|
||||
if (controller_iter == controller_list.cend()) {
|
||||
LOG_ERROR(Input, "Invalid identifier guid={}, pad={}, port={}", identifier.guid.RawString(),
|
||||
@ -126,7 +126,7 @@ bool InputEngine::GetHatButton(const PadIdentifier& identifier, int button, u8 d
|
||||
}
|
||||
|
||||
f32 InputEngine::GetAxis(const PadIdentifier& identifier, int axis) const {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
const auto controller_iter = controller_list.find(identifier);
|
||||
if (controller_iter == controller_list.cend()) {
|
||||
LOG_ERROR(Input, "Invalid identifier guid={}, pad={}, port={}", identifier.guid.RawString(),
|
||||
@ -143,7 +143,7 @@ f32 InputEngine::GetAxis(const PadIdentifier& identifier, int axis) const {
|
||||
}
|
||||
|
||||
Common::Input::BatteryLevel InputEngine::GetBattery(const PadIdentifier& identifier) const {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
const auto controller_iter = controller_list.find(identifier);
|
||||
if (controller_iter == controller_list.cend()) {
|
||||
LOG_ERROR(Input, "Invalid identifier guid={}, pad={}, port={}", identifier.guid.RawString(),
|
||||
@ -155,7 +155,7 @@ Common::Input::BatteryLevel InputEngine::GetBattery(const PadIdentifier& identif
|
||||
}
|
||||
|
||||
BasicMotion InputEngine::GetMotion(const PadIdentifier& identifier, int motion) const {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
const auto controller_iter = controller_list.find(identifier);
|
||||
if (controller_iter == controller_list.cend()) {
|
||||
LOG_ERROR(Input, "Invalid identifier guid={}, pad={}, port={}", identifier.guid.RawString(),
|
||||
@ -186,7 +186,7 @@ void InputEngine::ResetAnalogState() {
|
||||
}
|
||||
|
||||
void InputEngine::TriggerOnButtonChange(const PadIdentifier& identifier, int button, bool value) {
|
||||
std::lock_guard lock{mutex_callback};
|
||||
std::scoped_lock lock{mutex_callback};
|
||||
for (const auto& poller_pair : callback_list) {
|
||||
const InputIdentifier& poller = poller_pair.second;
|
||||
if (!IsInputIdentifierEqual(poller, identifier, EngineInputType::Button, button)) {
|
||||
@ -214,7 +214,7 @@ void InputEngine::TriggerOnButtonChange(const PadIdentifier& identifier, int but
|
||||
}
|
||||
|
||||
void InputEngine::TriggerOnHatButtonChange(const PadIdentifier& identifier, int button, u8 value) {
|
||||
std::lock_guard lock{mutex_callback};
|
||||
std::scoped_lock lock{mutex_callback};
|
||||
for (const auto& poller_pair : callback_list) {
|
||||
const InputIdentifier& poller = poller_pair.second;
|
||||
if (!IsInputIdentifierEqual(poller, identifier, EngineInputType::HatButton, button)) {
|
||||
@ -243,7 +243,7 @@ void InputEngine::TriggerOnHatButtonChange(const PadIdentifier& identifier, int
|
||||
}
|
||||
|
||||
void InputEngine::TriggerOnAxisChange(const PadIdentifier& identifier, int axis, f32 value) {
|
||||
std::lock_guard lock{mutex_callback};
|
||||
std::scoped_lock lock{mutex_callback};
|
||||
for (const auto& poller_pair : callback_list) {
|
||||
const InputIdentifier& poller = poller_pair.second;
|
||||
if (!IsInputIdentifierEqual(poller, identifier, EngineInputType::Analog, axis)) {
|
||||
@ -270,7 +270,7 @@ void InputEngine::TriggerOnAxisChange(const PadIdentifier& identifier, int axis,
|
||||
|
||||
void InputEngine::TriggerOnBatteryChange(const PadIdentifier& identifier,
|
||||
[[maybe_unused]] Common::Input::BatteryLevel value) {
|
||||
std::lock_guard lock{mutex_callback};
|
||||
std::scoped_lock lock{mutex_callback};
|
||||
for (const auto& poller_pair : callback_list) {
|
||||
const InputIdentifier& poller = poller_pair.second;
|
||||
if (!IsInputIdentifierEqual(poller, identifier, EngineInputType::Battery, 0)) {
|
||||
@ -284,7 +284,7 @@ void InputEngine::TriggerOnBatteryChange(const PadIdentifier& identifier,
|
||||
|
||||
void InputEngine::TriggerOnMotionChange(const PadIdentifier& identifier, int motion,
|
||||
const BasicMotion& value) {
|
||||
std::lock_guard lock{mutex_callback};
|
||||
std::scoped_lock lock{mutex_callback};
|
||||
for (const auto& poller_pair : callback_list) {
|
||||
const InputIdentifier& poller = poller_pair.second;
|
||||
if (!IsInputIdentifierEqual(poller, identifier, EngineInputType::Motion, motion)) {
|
||||
@ -346,18 +346,18 @@ const std::string& InputEngine::GetEngineName() const {
|
||||
}
|
||||
|
||||
int InputEngine::SetCallback(InputIdentifier input_identifier) {
|
||||
std::lock_guard lock{mutex_callback};
|
||||
std::scoped_lock lock{mutex_callback};
|
||||
callback_list.insert_or_assign(last_callback_key, std::move(input_identifier));
|
||||
return last_callback_key++;
|
||||
}
|
||||
|
||||
void InputEngine::SetMappingCallback(MappingCallback callback) {
|
||||
std::lock_guard lock{mutex_callback};
|
||||
std::scoped_lock lock{mutex_callback};
|
||||
mapping_callback = std::move(callback);
|
||||
}
|
||||
|
||||
void InputEngine::DeleteCallback(int key) {
|
||||
std::lock_guard lock{mutex_callback};
|
||||
std::scoped_lock lock{mutex_callback};
|
||||
const auto& iterator = callback_list.find(key);
|
||||
if (iterator == callback_list.end()) {
|
||||
LOG_ERROR(Input, "Tried to delete non-existent callback {}", key);
|
||||
|
@ -230,7 +230,7 @@ struct GPU::Impl {
|
||||
void IncrementSyncPoint(u32 syncpoint_id) {
|
||||
auto& syncpoint = syncpoints.at(syncpoint_id);
|
||||
syncpoint++;
|
||||
std::lock_guard lock{sync_mutex};
|
||||
std::scoped_lock lock{sync_mutex};
|
||||
sync_cv.notify_all();
|
||||
auto& interrupt = syncpt_interrupts.at(syncpoint_id);
|
||||
if (!interrupt.empty()) {
|
||||
@ -252,7 +252,7 @@ struct GPU::Impl {
|
||||
}
|
||||
|
||||
void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
|
||||
std::lock_guard lock{sync_mutex};
|
||||
std::scoped_lock lock{sync_mutex};
|
||||
auto& interrupt = syncpt_interrupts.at(syncpoint_id);
|
||||
bool contains = std::any_of(interrupt.begin(), interrupt.end(),
|
||||
[value](u32 in_value) { return in_value == value; });
|
||||
@ -263,7 +263,7 @@ struct GPU::Impl {
|
||||
}
|
||||
|
||||
[[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value) {
|
||||
std::lock_guard lock{sync_mutex};
|
||||
std::scoped_lock lock{sync_mutex};
|
||||
auto& interrupt = syncpt_interrupts.at(syncpoint_id);
|
||||
const auto iter =
|
||||
std::find_if(interrupt.begin(), interrupt.end(),
|
||||
|
@ -56,7 +56,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
|
||||
if (next.block) {
|
||||
// We have to lock the write_lock to ensure that the condition_variable wait not get a
|
||||
// race between the check and the lock itself.
|
||||
std::lock_guard lk(state.write_lock);
|
||||
std::scoped_lock lk{state.write_lock};
|
||||
state.cv.notify_all();
|
||||
}
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ GraphicsPipeline::GraphicsPipeline(
|
||||
}
|
||||
}
|
||||
if (in_parallel) {
|
||||
std::lock_guard lock{built_mutex};
|
||||
std::scoped_lock lock{built_mutex};
|
||||
built_fence.Create();
|
||||
// Flush this context to ensure compilation commands and fence are in the GPU pipe.
|
||||
glFlush();
|
||||
|
@ -258,7 +258,7 @@ void ShaderCache::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
|
||||
[this, key, env = std::move(env), &state, &callback](Context* ctx) mutable {
|
||||
ctx->pools.ReleaseContents();
|
||||
auto pipeline{CreateComputePipeline(ctx->pools, key, env)};
|
||||
std::lock_guard lock{state.mutex};
|
||||
std::scoped_lock lock{state.mutex};
|
||||
if (pipeline) {
|
||||
compute_cache.emplace(key, std::move(pipeline));
|
||||
}
|
||||
@ -280,7 +280,7 @@ void ShaderCache::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
|
||||
}
|
||||
ctx->pools.ReleaseContents();
|
||||
auto pipeline{CreateGraphicsPipeline(ctx->pools, key, MakeSpan(env_ptrs), false)};
|
||||
std::lock_guard lock{state.mutex};
|
||||
std::scoped_lock lock{state.mutex};
|
||||
if (pipeline) {
|
||||
graphics_cache.emplace(key, std::move(pipeline));
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ void PipelineStatistics::Collect(VkPipeline pipeline) {
|
||||
stage_stats.basic_block_count = GetUint64(statistic);
|
||||
}
|
||||
}
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
collected_stats.push_back(stage_stats);
|
||||
}
|
||||
}
|
||||
@ -66,7 +66,7 @@ void PipelineStatistics::Report() const {
|
||||
double num{};
|
||||
Stats total;
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
for (const Stats& stats : collected_stats) {
|
||||
total.code_size += stats.code_size;
|
||||
total.register_count += stats.register_count;
|
||||
|
@ -77,7 +77,7 @@ ComputePipeline::ComputePipeline(const Device& device_, DescriptorPool& descript
|
||||
if (pipeline_statistics) {
|
||||
pipeline_statistics->Collect(*pipeline);
|
||||
}
|
||||
std::lock_guard lock{build_mutex};
|
||||
std::scoped_lock lock{build_mutex};
|
||||
is_built = true;
|
||||
build_condvar.notify_one();
|
||||
if (shader_notify) {
|
||||
|
@ -258,7 +258,7 @@ GraphicsPipeline::GraphicsPipeline(
|
||||
pipeline_statistics->Collect(*pipeline);
|
||||
}
|
||||
|
||||
std::lock_guard lock{build_mutex};
|
||||
std::scoped_lock lock{build_mutex};
|
||||
is_built = true;
|
||||
build_condvar.notify_one();
|
||||
if (shader_notify) {
|
||||
|
@ -404,7 +404,7 @@ void PipelineCache::LoadDiskResources(u64 title_id, std::stop_token stop_loading
|
||||
workers.QueueWork([this, key, env = std::move(env), &state, &callback]() mutable {
|
||||
ShaderPools pools;
|
||||
auto pipeline{CreateComputePipeline(pools, key, env, state.statistics.get(), false)};
|
||||
std::lock_guard lock{state.mutex};
|
||||
std::scoped_lock lock{state.mutex};
|
||||
if (pipeline) {
|
||||
compute_cache.emplace(key, std::move(pipeline));
|
||||
}
|
||||
@ -434,7 +434,7 @@ void PipelineCache::LoadDiskResources(u64 title_id, std::stop_token stop_loading
|
||||
auto pipeline{CreateGraphicsPipeline(pools, key, MakeSpan(env_ptrs),
|
||||
state.statistics.get(), false)};
|
||||
|
||||
std::lock_guard lock{state.mutex};
|
||||
std::scoped_lock lock{state.mutex};
|
||||
graphics_cache.emplace(key, std::move(pipeline));
|
||||
++state.built;
|
||||
if (state.has_loaded) {
|
||||
|
@ -36,7 +36,7 @@ VkAttachmentDescription AttachmentDescription(const Device& device, PixelFormat
|
||||
RenderPassCache::RenderPassCache(const Device& device_) : device{&device_} {}
|
||||
|
||||
VkRenderPass RenderPassCache::Get(const RenderPassKey& key) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
const auto [pair, is_new] = cache.try_emplace(key);
|
||||
if (!is_new) {
|
||||
return *pair->second;
|
||||
|
@ -73,7 +73,7 @@ void VKScheduler::DispatchWork() {
|
||||
return;
|
||||
}
|
||||
{
|
||||
std::lock_guard lock{work_mutex};
|
||||
std::scoped_lock lock{work_mutex};
|
||||
work_queue.push(std::move(chunk));
|
||||
}
|
||||
work_cv.notify_one();
|
||||
@ -157,7 +157,7 @@ void VKScheduler::WorkerThread(std::stop_token stop_token) {
|
||||
if (has_submit) {
|
||||
AllocateWorkerCommandBuffer();
|
||||
}
|
||||
std::lock_guard reserve_lock{reserve_mutex};
|
||||
std::scoped_lock reserve_lock{reserve_mutex};
|
||||
chunk_reserve.push_back(std::move(work));
|
||||
} while (!stop_token.stop_requested());
|
||||
}
|
||||
@ -282,7 +282,7 @@ void VKScheduler::EndRenderPass() {
|
||||
}
|
||||
|
||||
void VKScheduler::AcquireNewChunk() {
|
||||
std::lock_guard lock{reserve_mutex};
|
||||
std::scoped_lock lock{reserve_mutex};
|
||||
if (chunk_reserve.empty()) {
|
||||
chunk = std::make_unique<CommandChunk>();
|
||||
return;
|
||||
|
@ -25,7 +25,7 @@ void ShaderCache::InvalidateRegion(VAddr addr, size_t size) {
|
||||
}
|
||||
|
||||
void ShaderCache::OnCPUWrite(VAddr addr, size_t size) {
|
||||
std::lock_guard lock{invalidation_mutex};
|
||||
std::scoped_lock lock{invalidation_mutex};
|
||||
InvalidatePagesInRegion(addr, size);
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ constexpr std::size_t TIMEOUT_SECONDS = 30;
|
||||
struct Client::Impl {
|
||||
Impl(std::string host, std::string username, std::string token)
|
||||
: host{std::move(host)}, username{std::move(username)}, token{std::move(token)} {
|
||||
std::lock_guard lock{jwt_cache.mutex};
|
||||
std::scoped_lock lock{jwt_cache.mutex};
|
||||
if (this->username == jwt_cache.username && this->token == jwt_cache.token) {
|
||||
jwt = jwt_cache.jwt;
|
||||
}
|
||||
@ -147,7 +147,7 @@ struct Client::Impl {
|
||||
if (result.result_code != WebResult::Code::Success) {
|
||||
LOG_ERROR(WebService, "UpdateJWT failed");
|
||||
} else {
|
||||
std::lock_guard lock{jwt_cache.mutex};
|
||||
std::scoped_lock lock{jwt_cache.mutex};
|
||||
jwt_cache.username = username;
|
||||
jwt_cache.token = token;
|
||||
jwt_cache.jwt = jwt = result.returned_data;
|
||||
|
@ -39,7 +39,7 @@ void ControllerNavigation::TriggerButton(Settings::NativeButton::Values native_b
|
||||
}
|
||||
|
||||
void ControllerNavigation::ControllerUpdateEvent(Core::HID::ControllerTriggerType type) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
if (!Settings::values.controller_navigation) {
|
||||
return;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user