early-access version 2665
This commit is contained in:
@@ -148,29 +148,33 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const {
|
||||
|
||||
// LayeredExeFS
|
||||
const auto load_dir = fs_controller.GetModificationLoadRoot(title_id);
|
||||
const auto sdmc_load_dir = fs_controller.GetSDMCModificationLoadRoot(title_id);
|
||||
|
||||
std::vector<VirtualDir> patch_dirs = {sdmc_load_dir};
|
||||
if (load_dir != nullptr && load_dir->GetSize() > 0) {
|
||||
auto patch_dirs = load_dir->GetSubdirectories();
|
||||
std::sort(
|
||||
patch_dirs.begin(), patch_dirs.end(),
|
||||
[](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); });
|
||||
const auto load_patch_dirs = load_dir->GetSubdirectories();
|
||||
patch_dirs.insert(patch_dirs.end(), load_patch_dirs.begin(), load_patch_dirs.end());
|
||||
}
|
||||
|
||||
std::vector<VirtualDir> layers;
|
||||
layers.reserve(patch_dirs.size() + 1);
|
||||
for (const auto& subdir : patch_dirs) {
|
||||
if (std::find(disabled.begin(), disabled.end(), subdir->GetName()) != disabled.end())
|
||||
continue;
|
||||
std::sort(patch_dirs.begin(), patch_dirs.end(),
|
||||
[](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); });
|
||||
|
||||
auto exefs_dir = FindSubdirectoryCaseless(subdir, "exefs");
|
||||
if (exefs_dir != nullptr)
|
||||
layers.push_back(std::move(exefs_dir));
|
||||
}
|
||||
layers.push_back(exefs);
|
||||
std::vector<VirtualDir> layers;
|
||||
layers.reserve(patch_dirs.size() + 1);
|
||||
for (const auto& subdir : patch_dirs) {
|
||||
if (std::find(disabled.begin(), disabled.end(), subdir->GetName()) != disabled.end())
|
||||
continue;
|
||||
|
||||
auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers));
|
||||
if (layered != nullptr) {
|
||||
LOG_INFO(Loader, " ExeFS: LayeredExeFS patches applied successfully");
|
||||
exefs = std::move(layered);
|
||||
}
|
||||
auto exefs_dir = FindSubdirectoryCaseless(subdir, "exefs");
|
||||
if (exefs_dir != nullptr)
|
||||
layers.push_back(std::move(exefs_dir));
|
||||
}
|
||||
layers.push_back(exefs);
|
||||
|
||||
auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers));
|
||||
if (layered != nullptr) {
|
||||
LOG_INFO(Loader, " ExeFS: LayeredExeFS patches applied successfully");
|
||||
exefs = std::move(layered);
|
||||
}
|
||||
|
||||
if (Settings::values.dump_exefs) {
|
||||
@@ -536,11 +540,20 @@ PatchManager::PatchVersionNames PatchManager::GetPatchVersionNames(VirtualFile u
|
||||
|
||||
// SDMC mod directory (RomFS LayeredFS)
|
||||
const auto sdmc_mod_dir = fs_controller.GetSDMCModificationLoadRoot(title_id);
|
||||
if (sdmc_mod_dir != nullptr && sdmc_mod_dir->GetSize() > 0 &&
|
||||
IsDirValidAndNonEmpty(FindSubdirectoryCaseless(sdmc_mod_dir, "romfs"))) {
|
||||
const auto mod_disabled =
|
||||
std::find(disabled.begin(), disabled.end(), "SDMC") != disabled.end();
|
||||
out.insert_or_assign(mod_disabled ? "[D] SDMC" : "SDMC", "LayeredFS");
|
||||
if (sdmc_mod_dir != nullptr && sdmc_mod_dir->GetSize() > 0) {
|
||||
std::string types;
|
||||
if (IsDirValidAndNonEmpty(FindSubdirectoryCaseless(sdmc_mod_dir, "exefs"))) {
|
||||
AppendCommaIfNotEmpty(types, "LayeredExeFS");
|
||||
}
|
||||
if (IsDirValidAndNonEmpty(FindSubdirectoryCaseless(sdmc_mod_dir, "romfs"))) {
|
||||
AppendCommaIfNotEmpty(types, "LayeredFS");
|
||||
}
|
||||
|
||||
if (!types.empty()) {
|
||||
const auto mod_disabled =
|
||||
std::find(disabled.begin(), disabled.end(), "SDMC") != disabled.end();
|
||||
out.insert_or_assign(mod_disabled ? "[D] SDMC" : "SDMC", types);
|
||||
}
|
||||
}
|
||||
|
||||
// DLC
|
||||
|
@@ -346,7 +346,8 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
||||
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
bool invalidate_entire_icache) {
|
||||
// Validate the mapping request.
|
||||
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
@@ -396,7 +397,11 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std
|
||||
bool reprotected_pages = false;
|
||||
SCOPE_EXIT({
|
||||
if (reprotected_pages && any_code_pages) {
|
||||
system.InvalidateCpuInstructionCacheRange(dst_address, size);
|
||||
if (invalidate_entire_icache) {
|
||||
system.InvalidateCpuInstructionCaches();
|
||||
} else {
|
||||
system.InvalidateCpuInstructionCacheRange(dst_address, size);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -563,6 +568,8 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
||||
block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
|
||||
KMemoryAttribute::None);
|
||||
|
||||
system.InvalidateCpuInstructionCaches();
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
|
@@ -38,7 +38,8 @@ public:
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
bool invalidate_entire_icache);
|
||||
ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr);
|
||||
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
|
@@ -99,7 +99,7 @@ struct KernelCore::Impl {
|
||||
// Close all open server sessions and ports.
|
||||
std::unordered_set<KAutoObject*> server_objects_;
|
||||
{
|
||||
std::lock_guard lk(server_objects_lock);
|
||||
std::scoped_lock lk(server_objects_lock);
|
||||
server_objects_ = server_objects;
|
||||
server_objects.clear();
|
||||
}
|
||||
@@ -158,7 +158,7 @@ struct KernelCore::Impl {
|
||||
|
||||
// Close kernel objects that were not freed on shutdown
|
||||
{
|
||||
std::lock_guard lk(registered_in_use_objects_lock);
|
||||
std::scoped_lock lk{registered_in_use_objects_lock};
|
||||
if (registered_in_use_objects.size()) {
|
||||
for (auto& object : registered_in_use_objects) {
|
||||
object->Close();
|
||||
@@ -179,10 +179,10 @@ struct KernelCore::Impl {
|
||||
|
||||
// Track kernel objects that were not freed on shutdown
|
||||
{
|
||||
std::lock_guard lk(registered_objects_lock);
|
||||
std::scoped_lock lk{registered_objects_lock};
|
||||
if (registered_objects.size()) {
|
||||
LOG_CRITICAL(Kernel, "{} kernel objects were dangling on shutdown!",
|
||||
registered_objects.size());
|
||||
LOG_DEBUG(Kernel, "{} kernel objects were dangling on shutdown!",
|
||||
registered_objects.size());
|
||||
registered_objects.clear();
|
||||
}
|
||||
}
|
||||
@@ -673,12 +673,12 @@ struct KernelCore::Impl {
|
||||
}
|
||||
|
||||
void RegisterServerObject(KAutoObject* server_object) {
|
||||
std::lock_guard lk(server_objects_lock);
|
||||
std::scoped_lock lk(server_objects_lock);
|
||||
server_objects.insert(server_object);
|
||||
}
|
||||
|
||||
void UnregisterServerObject(KAutoObject* server_object) {
|
||||
std::lock_guard lk(server_objects_lock);
|
||||
std::scoped_lock lk(server_objects_lock);
|
||||
server_objects.erase(server_object);
|
||||
}
|
||||
|
||||
@@ -954,22 +954,22 @@ void KernelCore::UnregisterServerObject(KAutoObject* server_object) {
|
||||
}
|
||||
|
||||
void KernelCore::RegisterKernelObject(KAutoObject* object) {
|
||||
std::lock_guard lk(impl->registered_objects_lock);
|
||||
std::scoped_lock lk{impl->registered_objects_lock};
|
||||
impl->registered_objects.insert(object);
|
||||
}
|
||||
|
||||
void KernelCore::UnregisterKernelObject(KAutoObject* object) {
|
||||
std::lock_guard lk(impl->registered_objects_lock);
|
||||
std::scoped_lock lk{impl->registered_objects_lock};
|
||||
impl->registered_objects.erase(object);
|
||||
}
|
||||
|
||||
void KernelCore::RegisterInUseObject(KAutoObject* object) {
|
||||
std::lock_guard lk(impl->registered_in_use_objects_lock);
|
||||
std::scoped_lock lk{impl->registered_in_use_objects_lock};
|
||||
impl->registered_in_use_objects.insert(object);
|
||||
}
|
||||
|
||||
void KernelCore::UnregisterInUseObject(KAutoObject* object) {
|
||||
std::lock_guard lk(impl->registered_in_use_objects_lock);
|
||||
std::scoped_lock lk{impl->registered_in_use_objects_lock};
|
||||
impl->registered_in_use_objects.erase(object);
|
||||
}
|
||||
|
||||
|
@@ -1713,7 +1713,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
|
||||
return ResultInvalidMemoryRegion;
|
||||
}
|
||||
|
||||
return page_table.UnmapCodeMemory(dst_address, src_address, size);
|
||||
return page_table.UnmapCodeMemory(dst_address, src_address, size, true);
|
||||
}
|
||||
|
||||
/// Exits the current process
|
||||
|
@@ -24,7 +24,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||
}
|
||||
|
||||
void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
if (nanoseconds > 0) {
|
||||
ASSERT(thread);
|
||||
ASSERT(thread->GetState() != ThreadState::Runnable);
|
||||
@@ -35,7 +35,7 @@ void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
|
||||
}
|
||||
|
||||
void TimeManager::UnscheduleTimeEvent(KThread* thread) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
system.CoreTiming().UnscheduleEvent(time_manager_event_type,
|
||||
reinterpret_cast<uintptr_t>(thread));
|
||||
}
|
||||
|
@@ -318,7 +318,7 @@ void Controller_NPad::OnRelease() {
|
||||
}
|
||||
|
||||
void Controller_NPad::RequestPadStateUpdate(Core::HID::NpadIdType npad_id) {
|
||||
std::lock_guard lock{mutex};
|
||||
std::scoped_lock lock{mutex};
|
||||
auto& controller = GetControllerFromNpadIdType(npad_id);
|
||||
const auto controller_type = controller.device->GetNpadStyleIndex();
|
||||
if (!controller.device->IsConnected()) {
|
||||
|
@@ -389,8 +389,8 @@ public:
|
||||
|
||||
if (bss_size) {
|
||||
auto block_guard = detail::ScopeExit([&] {
|
||||
page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size);
|
||||
page_table.UnmapCodeMemory(addr, nro_addr, nro_size);
|
||||
page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size, false);
|
||||
page_table.UnmapCodeMemory(addr, nro_addr, nro_size, false);
|
||||
});
|
||||
|
||||
const ResultCode result{
|
||||
@@ -572,15 +572,17 @@ public:
|
||||
if (info.bss_size != 0) {
|
||||
CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address + info.text_size +
|
||||
info.ro_size + info.data_size,
|
||||
info.bss_address, info.bss_size));
|
||||
info.bss_address, info.bss_size, false));
|
||||
}
|
||||
|
||||
CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size,
|
||||
info.src_addr + info.text_size + info.ro_size,
|
||||
info.data_size));
|
||||
info.data_size, false));
|
||||
CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address + info.text_size,
|
||||
info.src_addr + info.text_size, info.ro_size));
|
||||
CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size));
|
||||
info.src_addr + info.text_size, info.ro_size,
|
||||
false));
|
||||
CASCADE_CODE(
|
||||
page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size, false));
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
|
@@ -21,7 +21,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco
|
||||
return Status::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(mutex);
|
||||
std::scoped_lock lock{mutex};
|
||||
|
||||
if (const auto status = AcquireBufferLocked(item, present_when); status != Status::NoError) {
|
||||
if (status != Status::NoBufferAvailable) {
|
||||
@@ -40,7 +40,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco
|
||||
}
|
||||
|
||||
Status BufferItemConsumer::ReleaseBuffer(const BufferItem& item, Fence& release_fence) {
|
||||
std::scoped_lock lock(mutex);
|
||||
std::scoped_lock lock{mutex};
|
||||
|
||||
if (const auto status = AddReleaseFenceLocked(item.buf, item.graphic_buffer, release_fence);
|
||||
status != Status::NoError) {
|
||||
|
@@ -19,7 +19,7 @@ BufferQueueConsumer::~BufferQueueConsumer() = default;
|
||||
|
||||
Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
|
||||
std::chrono::nanoseconds expected_present) {
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
// Check that the consumer doesn't currently have the maximum number of buffers acquired.
|
||||
const s32 num_acquired_buffers{
|
||||
@@ -120,7 +120,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
|
||||
|
||||
std::shared_ptr<IProducerListener> listener;
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
// If the frame number has changed because the buffer has been reallocated, we can ignore
|
||||
// this ReleaseBuffer for the old buffer.
|
||||
@@ -180,7 +180,7 @@ Status BufferQueueConsumer::Connect(std::shared_ptr<IConsumerListener> consumer_
|
||||
|
||||
LOG_DEBUG(Service_NVFlinger, "controlled_by_app={}", controlled_by_app);
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@@ -199,7 +199,7 @@ Status BufferQueueConsumer::GetReleasedBuffers(u64* out_slot_mask) {
|
||||
return Status::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
|
@@ -15,7 +15,7 @@ BufferQueueCore::BufferQueueCore() = default;
|
||||
BufferQueueCore::~BufferQueueCore() = default;
|
||||
|
||||
void BufferQueueCore::NotifyShutdown() {
|
||||
std::scoped_lock lock(mutex);
|
||||
std::scoped_lock lock{mutex};
|
||||
|
||||
is_shutting_down = true;
|
||||
|
||||
|
@@ -38,7 +38,7 @@ BufferQueueProducer::~BufferQueueProducer() {
|
||||
Status BufferQueueProducer::RequestBuffer(s32 slot, std::shared_ptr<GraphicBuffer>* buf) {
|
||||
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@@ -65,7 +65,7 @@ Status BufferQueueProducer::SetBufferCount(s32 buffer_count) {
|
||||
|
||||
std::shared_ptr<IConsumerListener> listener;
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
core->WaitWhileAllocatingLocked();
|
||||
|
||||
if (core->is_abandoned) {
|
||||
@@ -236,7 +236,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
|
||||
Status return_flags = Status::NoError;
|
||||
bool attached_by_consumer = false;
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
core->WaitWhileAllocatingLocked();
|
||||
|
||||
if (format == PixelFormat::NoFormat) {
|
||||
@@ -295,7 +295,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
|
||||
}
|
||||
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@@ -320,7 +320,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
|
||||
Status BufferQueueProducer::DetachBuffer(s32 slot) {
|
||||
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@@ -356,7 +356,7 @@ Status BufferQueueProducer::DetachNextBuffer(std::shared_ptr<GraphicBuffer>* out
|
||||
return Status::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
core->WaitWhileAllocatingLocked();
|
||||
|
||||
if (core->is_abandoned) {
|
||||
@@ -399,7 +399,7 @@ Status BufferQueueProducer::AttachBuffer(s32* out_slot,
|
||||
return Status::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
core->WaitWhileAllocatingLocked();
|
||||
|
||||
Status return_flags = Status::NoError;
|
||||
@@ -460,7 +460,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
|
||||
BufferItem item;
|
||||
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@@ -576,7 +576,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
|
||||
// Call back without the main BufferQueue lock held, but with the callback lock held so we can
|
||||
// ensure that callbacks occur in order
|
||||
{
|
||||
std::scoped_lock lock(callback_mutex);
|
||||
std::scoped_lock lock{callback_mutex};
|
||||
while (callback_ticket != current_callback_ticket) {
|
||||
callback_condition.wait(callback_mutex);
|
||||
}
|
||||
@@ -597,7 +597,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
|
||||
void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
|
||||
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (core->is_abandoned) {
|
||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||
@@ -623,7 +623,7 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
|
||||
}
|
||||
|
||||
Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
if (out_value == nullptr) {
|
||||
LOG_ERROR(Service_NVFlinger, "outValue was nullptr");
|
||||
@@ -673,7 +673,7 @@ Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
|
||||
Status BufferQueueProducer::Connect(const std::shared_ptr<IProducerListener>& listener,
|
||||
NativeWindowApi api, bool producer_controlled_by_app,
|
||||
QueueBufferOutput* output) {
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
LOG_DEBUG(Service_NVFlinger, "api = {} producer_controlled_by_app = {}", api,
|
||||
producer_controlled_by_app);
|
||||
@@ -730,7 +730,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) {
|
||||
std::shared_ptr<IConsumerListener> listener;
|
||||
|
||||
{
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
core->WaitWhileAllocatingLocked();
|
||||
|
||||
@@ -780,7 +780,7 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot,
|
||||
return Status::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(core->mutex);
|
||||
std::scoped_lock lock{core->mutex};
|
||||
|
||||
slots[slot] = {};
|
||||
slots[slot].graphic_buffer = buffer;
|
||||
|
@@ -18,7 +18,7 @@ ConsumerBase::ConsumerBase(std::unique_ptr<BufferQueueConsumer> consumer_)
|
||||
: consumer{std::move(consumer_)} {}
|
||||
|
||||
ConsumerBase::~ConsumerBase() {
|
||||
std::scoped_lock lock(mutex);
|
||||
std::scoped_lock lock{mutex};
|
||||
|
||||
ASSERT_MSG(is_abandoned, "consumer is not abandoned!");
|
||||
}
|
||||
@@ -44,7 +44,7 @@ void ConsumerBase::OnFrameReplaced(const BufferItem& item) {
|
||||
}
|
||||
|
||||
void ConsumerBase::OnBuffersReleased() {
|
||||
std::scoped_lock lock(mutex);
|
||||
std::scoped_lock lock{mutex};
|
||||
|
||||
LOG_DEBUG(Service_NVFlinger, "called");
|
||||
|
||||
|
@@ -14,7 +14,7 @@ HosBinderDriverServer::HosBinderDriverServer(Core::System& system_)
|
||||
HosBinderDriverServer::~HosBinderDriverServer() {}
|
||||
|
||||
u64 HosBinderDriverServer::RegisterProducer(std::unique_ptr<android::IBinder>&& binder) {
|
||||
std::lock_guard lk{lock};
|
||||
std::scoped_lock lk{lock};
|
||||
|
||||
last_id++;
|
||||
|
||||
@@ -24,7 +24,7 @@ u64 HosBinderDriverServer::RegisterProducer(std::unique_ptr<android::IBinder>&&
|
||||
}
|
||||
|
||||
android::IBinder* HosBinderDriverServer::TryGetProducer(u64 id) {
|
||||
std::lock_guard lk{lock};
|
||||
std::scoped_lock lk{lock};
|
||||
|
||||
if (auto search = producers.find(id); search != producers.end()) {
|
||||
return search->second.get();
|
||||
|
@@ -689,6 +689,9 @@ Errno BSD::SetSockOptImpl(s32 fd, u32 level, OptName optname, size_t optlen, con
|
||||
case OptName::REUSEADDR:
|
||||
ASSERT(value == 0 || value == 1);
|
||||
return Translate(socket->SetReuseAddr(value != 0));
|
||||
case OptName::KEEPALIVE:
|
||||
ASSERT(value == 0 || value == 1);
|
||||
return Translate(socket->SetKeepAlive(value != 0));
|
||||
case OptName::BROADCAST:
|
||||
ASSERT(value == 0 || value == 1);
|
||||
return Translate(socket->SetBroadcast(value != 0));
|
||||
|
@@ -2,8 +2,24 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/string_util.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/service/sockets/sfdnsres.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <ws2tcpip.h>
|
||||
#elif YUZU_UNIX
|
||||
#include <arpa/inet.h>
|
||||
#include <netdb.h>
|
||||
#include <sys/socket.h>
|
||||
#endif
|
||||
|
||||
namespace Service::Sockets {
|
||||
|
||||
@@ -21,7 +37,7 @@ SFDNSRES::SFDNSRES(Core::System& system_) : ServiceFramework{system_, "sfdnsres"
|
||||
{9, nullptr, "CancelRequest"},
|
||||
{10, nullptr, "GetHostByNameRequestWithOptions"},
|
||||
{11, nullptr, "GetHostByAddrRequestWithOptions"},
|
||||
{12, nullptr, "GetAddrInfoRequestWithOptions"},
|
||||
{12, &SFDNSRES::GetAddrInfoRequestWithOptions, "GetAddrInfoRequestWithOptions"},
|
||||
{13, nullptr, "GetNameInfoRequestWithOptions"},
|
||||
{14, nullptr, "ResolverSetOptionRequest"},
|
||||
{15, nullptr, "ResolverGetOptionRequest"},
|
||||
@@ -31,7 +47,142 @@ SFDNSRES::SFDNSRES(Core::System& system_) : ServiceFramework{system_, "sfdnsres"
|
||||
|
||||
SFDNSRES::~SFDNSRES() = default;
|
||||
|
||||
void SFDNSRES::GetAddrInfoRequest(Kernel::HLERequestContext& ctx) {
|
||||
enum class NetDbError : s32 {
|
||||
Internal = -1,
|
||||
Success = 0,
|
||||
HostNotFound = 1,
|
||||
TryAgain = 2,
|
||||
NoRecovery = 3,
|
||||
NoData = 4,
|
||||
};
|
||||
|
||||
static NetDbError AddrInfoErrorToNetDbError(s32 result) {
|
||||
// Best effort guess to map errors
|
||||
switch (result) {
|
||||
case 0:
|
||||
return NetDbError::Success;
|
||||
case EAI_AGAIN:
|
||||
return NetDbError::TryAgain;
|
||||
case EAI_NODATA:
|
||||
return NetDbError::NoData;
|
||||
default:
|
||||
return NetDbError::HostNotFound;
|
||||
}
|
||||
}
|
||||
|
||||
static std::vector<u8> SerializeAddrInfo(const addrinfo* addrinfo, s32 result_code,
|
||||
std::string_view host) {
|
||||
// Adapted from
|
||||
// https://github.com/switchbrew/libnx/blob/c5a9a909a91657a9818a3b7e18c9b91ff0cbb6e3/nx/source/runtime/resolver.c#L190
|
||||
std::vector<u8> data;
|
||||
|
||||
auto* current = addrinfo;
|
||||
while (current != nullptr) {
|
||||
struct SerializedResponseHeader {
|
||||
u32 magic;
|
||||
s32 flags;
|
||||
s32 family;
|
||||
s32 socket_type;
|
||||
s32 protocol;
|
||||
u32 address_length;
|
||||
};
|
||||
static_assert(sizeof(SerializedResponseHeader) == 0x18,
|
||||
"Response header size must be 0x18 bytes");
|
||||
|
||||
constexpr auto header_size = sizeof(SerializedResponseHeader);
|
||||
const auto addr_size =
|
||||
current->ai_addr && current->ai_addrlen > 0 ? current->ai_addrlen : 4;
|
||||
const auto canonname_size = current->ai_canonname ? strlen(current->ai_canonname) + 1 : 1;
|
||||
|
||||
const auto last_size = data.size();
|
||||
data.resize(last_size + header_size + addr_size + canonname_size);
|
||||
|
||||
// Header in network byte order
|
||||
SerializedResponseHeader header{};
|
||||
|
||||
constexpr auto HEADER_MAGIC = 0xBEEFCAFE;
|
||||
header.magic = htonl(HEADER_MAGIC);
|
||||
header.family = htonl(current->ai_family);
|
||||
header.flags = htonl(current->ai_flags);
|
||||
header.socket_type = htonl(current->ai_socktype);
|
||||
header.protocol = htonl(current->ai_protocol);
|
||||
header.address_length = current->ai_addr ? htonl((u32)current->ai_addrlen) : 0;
|
||||
|
||||
auto* header_ptr = data.data() + last_size;
|
||||
std::memcpy(header_ptr, &header, header_size);
|
||||
|
||||
if (header.address_length == 0) {
|
||||
std::memset(header_ptr + header_size, 0, 4);
|
||||
} else {
|
||||
switch (current->ai_family) {
|
||||
case AF_INET: {
|
||||
struct SockAddrIn {
|
||||
s16 sin_family;
|
||||
u16 sin_port;
|
||||
u32 sin_addr;
|
||||
u8 sin_zero[8];
|
||||
};
|
||||
|
||||
SockAddrIn serialized_addr{};
|
||||
const auto addr = *reinterpret_cast<sockaddr_in*>(current->ai_addr);
|
||||
serialized_addr.sin_port = htons(addr.sin_port);
|
||||
serialized_addr.sin_family = htons(addr.sin_family);
|
||||
serialized_addr.sin_addr = htonl(addr.sin_addr.s_addr);
|
||||
std::memcpy(header_ptr + header_size, &serialized_addr, sizeof(SockAddrIn));
|
||||
|
||||
char addr_string_buf[64]{};
|
||||
inet_ntop(AF_INET, &addr.sin_addr, addr_string_buf, std::size(addr_string_buf));
|
||||
LOG_INFO(Service, "Resolved host '{}' to IPv4 address {}", host, addr_string_buf);
|
||||
break;
|
||||
}
|
||||
case AF_INET6: {
|
||||
struct SockAddrIn6 {
|
||||
s16 sin6_family;
|
||||
u16 sin6_port;
|
||||
u32 sin6_flowinfo;
|
||||
u8 sin6_addr[16];
|
||||
u32 sin6_scope_id;
|
||||
};
|
||||
|
||||
SockAddrIn6 serialized_addr{};
|
||||
const auto addr = *reinterpret_cast<sockaddr_in6*>(current->ai_addr);
|
||||
serialized_addr.sin6_family = htons(addr.sin6_family);
|
||||
serialized_addr.sin6_port = htons(addr.sin6_port);
|
||||
serialized_addr.sin6_flowinfo = htonl(addr.sin6_flowinfo);
|
||||
serialized_addr.sin6_scope_id = htonl(addr.sin6_scope_id);
|
||||
std::memcpy(serialized_addr.sin6_addr, &addr.sin6_addr,
|
||||
sizeof(SockAddrIn6::sin6_addr));
|
||||
std::memcpy(header_ptr + header_size, &serialized_addr, sizeof(SockAddrIn6));
|
||||
|
||||
char addr_string_buf[64]{};
|
||||
inet_ntop(AF_INET6, &addr.sin6_addr, addr_string_buf, std::size(addr_string_buf));
|
||||
LOG_INFO(Service, "Resolved host '{}' to IPv6 address {}", host, addr_string_buf);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
std::memcpy(header_ptr + header_size, current->ai_addr, addr_size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (current->ai_canonname) {
|
||||
std::memcpy(header_ptr + addr_size, current->ai_canonname, canonname_size);
|
||||
} else {
|
||||
*(header_ptr + header_size + addr_size) = 0;
|
||||
}
|
||||
|
||||
current = current->ai_next;
|
||||
}
|
||||
|
||||
// 4-byte sentinel value
|
||||
data.push_back(0);
|
||||
data.push_back(0);
|
||||
data.push_back(0);
|
||||
data.push_back(0);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
static std::pair<u32, s32> GetAddrInfoRequestImpl(Kernel::HLERequestContext& ctx) {
|
||||
struct Parameters {
|
||||
u8 use_nsd_resolve;
|
||||
u32 unknown;
|
||||
@@ -42,11 +193,51 @@ void SFDNSRES::GetAddrInfoRequest(Kernel::HLERequestContext& ctx) {
|
||||
const auto parameters = rp.PopRaw<Parameters>();
|
||||
|
||||
LOG_WARNING(Service,
|
||||
"(STUBBED) called. use_nsd_resolve={}, unknown=0x{:08X}, process_id=0x{:016X}",
|
||||
"called with ignored parameters: use_nsd_resolve={}, unknown={}, process_id={}",
|
||||
parameters.use_nsd_resolve, parameters.unknown, parameters.process_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
const auto host_buffer = ctx.ReadBuffer(0);
|
||||
const std::string host = Common::StringFromBuffer(host_buffer);
|
||||
|
||||
const auto service_buffer = ctx.ReadBuffer(1);
|
||||
const std::string service = Common::StringFromBuffer(service_buffer);
|
||||
|
||||
addrinfo* addrinfo;
|
||||
// Pass null for hints. Serialized hints are also passed in a buffer, but are ignored for now
|
||||
s32 result_code = getaddrinfo(host.c_str(), service.c_str(), nullptr, &addrinfo);
|
||||
|
||||
u32 data_size = 0;
|
||||
if (result_code == 0 && addrinfo != nullptr) {
|
||||
const std::vector<u8>& data = SerializeAddrInfo(addrinfo, result_code, host);
|
||||
data_size = static_cast<u32>(data.size());
|
||||
freeaddrinfo(addrinfo);
|
||||
|
||||
ctx.WriteBuffer(data, 0);
|
||||
}
|
||||
|
||||
return std::make_pair(data_size, result_code);
|
||||
}
|
||||
|
||||
} // namespace Service::Sockets
|
||||
void SFDNSRES::GetAddrInfoRequest(Kernel::HLERequestContext& ctx) {
|
||||
auto [data_size, result_code] = GetAddrInfoRequestImpl(ctx);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push(static_cast<s32>(AddrInfoErrorToNetDbError(result_code))); // NetDBErrorCode
|
||||
rb.Push(result_code); // errno
|
||||
rb.Push(data_size); // serialized size
|
||||
}
|
||||
|
||||
void SFDNSRES::GetAddrInfoRequestWithOptions(Kernel::HLERequestContext& ctx) {
|
||||
// Additional options are ignored
|
||||
auto [data_size, result_code] = GetAddrInfoRequestImpl(ctx);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 5};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push(data_size); // serialized size
|
||||
rb.Push(result_code); // errno
|
||||
rb.Push(static_cast<s32>(AddrInfoErrorToNetDbError(result_code))); // NetDBErrorCode
|
||||
rb.Push(0);
|
||||
}
|
||||
|
||||
} // namespace Service::Sockets
|
@@ -19,6 +19,7 @@ public:
|
||||
|
||||
private:
|
||||
void GetAddrInfoRequest(Kernel::HLERequestContext& ctx);
|
||||
void GetAddrInfoRequestWithOptions(Kernel::HLERequestContext& ctx);
|
||||
};
|
||||
|
||||
} // namespace Service::Sockets
|
||||
|
@@ -46,6 +46,7 @@ enum class Protocol : u32 {
|
||||
|
||||
enum class OptName : u32 {
|
||||
REUSEADDR = 0x4,
|
||||
KEEPALIVE = 0x8,
|
||||
BROADCAST = 0x20,
|
||||
LINGER = 0x80,
|
||||
SNDBUF = 0x1001,
|
||||
|
@@ -600,6 +600,10 @@ Errno Socket::SetReuseAddr(bool enable) {
|
||||
return SetSockOpt<u32>(fd, SO_REUSEADDR, enable ? 1 : 0);
|
||||
}
|
||||
|
||||
Errno Socket::SetKeepAlive(bool enable) {
|
||||
return SetSockOpt<u32>(fd, SO_KEEPALIVE, enable ? 1 : 0);
|
||||
}
|
||||
|
||||
Errno Socket::SetBroadcast(bool enable) {
|
||||
return SetSockOpt<u32>(fd, SO_BROADCAST, enable ? 1 : 0);
|
||||
}
|
||||
|
@@ -67,6 +67,8 @@ public:
|
||||
|
||||
Errno SetReuseAddr(bool enable);
|
||||
|
||||
Errno SetKeepAlive(bool enable);
|
||||
|
||||
Errno SetBroadcast(bool enable);
|
||||
|
||||
Errno SetSndBuf(u32 value);
|
||||
|
@@ -53,13 +53,13 @@ PerfStats::~PerfStats() {
|
||||
}
|
||||
|
||||
void PerfStats::BeginSystemFrame() {
|
||||
std::lock_guard lock{object_mutex};
|
||||
std::scoped_lock lock{object_mutex};
|
||||
|
||||
frame_begin = Clock::now();
|
||||
}
|
||||
|
||||
void PerfStats::EndSystemFrame() {
|
||||
std::lock_guard lock{object_mutex};
|
||||
std::scoped_lock lock{object_mutex};
|
||||
|
||||
auto frame_end = Clock::now();
|
||||
const auto frame_time = frame_end - frame_begin;
|
||||
@@ -79,7 +79,7 @@ void PerfStats::EndGameFrame() {
|
||||
}
|
||||
|
||||
double PerfStats::GetMeanFrametime() const {
|
||||
std::lock_guard lock{object_mutex};
|
||||
std::scoped_lock lock{object_mutex};
|
||||
|
||||
if (current_index <= IgnoreFrames) {
|
||||
return 0;
|
||||
@@ -91,7 +91,7 @@ double PerfStats::GetMeanFrametime() const {
|
||||
}
|
||||
|
||||
PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) {
|
||||
std::lock_guard lock{object_mutex};
|
||||
std::scoped_lock lock{object_mutex};
|
||||
|
||||
const auto now = Clock::now();
|
||||
// Walltime elapsed since stats were reset
|
||||
@@ -120,7 +120,7 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us
|
||||
}
|
||||
|
||||
double PerfStats::GetLastFrameTimeScale() const {
|
||||
std::lock_guard lock{object_mutex};
|
||||
std::scoped_lock lock{object_mutex};
|
||||
|
||||
constexpr double FRAME_LENGTH = 1.0 / 60;
|
||||
return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH;
|
||||
|
@@ -80,7 +80,7 @@ bool Freezer::IsActive() const {
|
||||
}
|
||||
|
||||
void Freezer::Clear() {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
LOG_DEBUG(Common_Memory, "Clearing all frozen memory values.");
|
||||
|
||||
@@ -88,7 +88,7 @@ void Freezer::Clear() {
|
||||
}
|
||||
|
||||
u64 Freezer::Freeze(VAddr address, u32 width) {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
const auto current_value = MemoryReadWidth(memory, width, address);
|
||||
entries.push_back({address, width, current_value});
|
||||
@@ -101,7 +101,7 @@ u64 Freezer::Freeze(VAddr address, u32 width) {
|
||||
}
|
||||
|
||||
void Freezer::Unfreeze(VAddr address) {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
LOG_DEBUG(Common_Memory, "Unfreezing memory for address={:016X}", address);
|
||||
|
||||
@@ -109,13 +109,13 @@ void Freezer::Unfreeze(VAddr address) {
|
||||
}
|
||||
|
||||
bool Freezer::IsFrozen(VAddr address) const {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
return FindEntry(address) != entries.cend();
|
||||
}
|
||||
|
||||
void Freezer::SetFrozenValue(VAddr address, u64 value) {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
const auto iter = FindEntry(address);
|
||||
|
||||
@@ -132,7 +132,7 @@ void Freezer::SetFrozenValue(VAddr address, u64 value) {
|
||||
}
|
||||
|
||||
std::optional<Freezer::Entry> Freezer::GetEntry(VAddr address) const {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
const auto iter = FindEntry(address);
|
||||
|
||||
@@ -144,7 +144,7 @@ std::optional<Freezer::Entry> Freezer::GetEntry(VAddr address) const {
|
||||
}
|
||||
|
||||
std::vector<Freezer::Entry> Freezer::GetEntries() const {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
return entries;
|
||||
}
|
||||
@@ -165,7 +165,7 @@ void Freezer::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
for (const auto& entry : entries) {
|
||||
LOG_DEBUG(Common_Memory,
|
||||
@@ -178,7 +178,7 @@ void Freezer::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) {
|
||||
}
|
||||
|
||||
void Freezer::FillEntryReads() {
|
||||
std::lock_guard lock{entries_mutex};
|
||||
std::scoped_lock lock{entries_mutex};
|
||||
|
||||
LOG_DEBUG(Common_Memory, "Updating memory freeze entries to current values.");
|
||||
|
||||
|
Reference in New Issue
Block a user