early-access version 1420
This commit is contained in:
parent
7381de3435
commit
84e1d5b26b
@ -1,7 +1,7 @@
|
|||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 1419.
|
This is the source code for early-access 1420.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
@ -19,15 +19,14 @@ namespace Common {
|
|||||||
/// SPSC ring buffer
|
/// SPSC ring buffer
|
||||||
/// @tparam T Element type
|
/// @tparam T Element type
|
||||||
/// @tparam capacity Number of slots in ring buffer
|
/// @tparam capacity Number of slots in ring buffer
|
||||||
/// @tparam granularity Slot size in terms of number of elements
|
template <typename T, std::size_t capacity>
|
||||||
template <typename T, std::size_t capacity, std::size_t granularity = 1>
|
|
||||||
class RingBuffer {
|
class RingBuffer {
|
||||||
/// A "slot" is made of `granularity` elements of `T`.
|
/// A "slot" is made of a single `T`.
|
||||||
static constexpr std::size_t slot_size = granularity * sizeof(T);
|
static constexpr std::size_t slot_size = sizeof(T);
|
||||||
// T must be safely memcpy-able and have a trivial default constructor.
|
// T must be safely memcpy-able and have a trivial default constructor.
|
||||||
static_assert(std::is_trivial_v<T>);
|
static_assert(std::is_trivial_v<T>);
|
||||||
// Ensure capacity is sensible.
|
// Ensure capacity is sensible.
|
||||||
static_assert(capacity < std::numeric_limits<std::size_t>::max() / 2 / granularity);
|
static_assert(capacity < std::numeric_limits<std::size_t>::max() / 2);
|
||||||
static_assert((capacity & (capacity - 1)) == 0, "capacity must be a power of two");
|
static_assert((capacity & (capacity - 1)) == 0, "capacity must be a power of two");
|
||||||
// Ensure lock-free.
|
// Ensure lock-free.
|
||||||
static_assert(std::atomic_size_t::is_always_lock_free);
|
static_assert(std::atomic_size_t::is_always_lock_free);
|
||||||
@ -47,7 +46,7 @@ public:
|
|||||||
const std::size_t second_copy = push_count - first_copy;
|
const std::size_t second_copy = push_count - first_copy;
|
||||||
|
|
||||||
const char* in = static_cast<const char*>(new_slots);
|
const char* in = static_cast<const char*>(new_slots);
|
||||||
std::memcpy(m_data.data() + pos * granularity, in, first_copy * slot_size);
|
std::memcpy(m_data.data() + pos, in, first_copy * slot_size);
|
||||||
in += first_copy * slot_size;
|
in += first_copy * slot_size;
|
||||||
std::memcpy(m_data.data(), in, second_copy * slot_size);
|
std::memcpy(m_data.data(), in, second_copy * slot_size);
|
||||||
|
|
||||||
@ -74,7 +73,7 @@ public:
|
|||||||
const std::size_t second_copy = pop_count - first_copy;
|
const std::size_t second_copy = pop_count - first_copy;
|
||||||
|
|
||||||
char* out = static_cast<char*>(output);
|
char* out = static_cast<char*>(output);
|
||||||
std::memcpy(out, m_data.data() + pos * granularity, first_copy * slot_size);
|
std::memcpy(out, m_data.data() + pos, first_copy * slot_size);
|
||||||
out += first_copy * slot_size;
|
out += first_copy * slot_size;
|
||||||
std::memcpy(out, m_data.data(), second_copy * slot_size);
|
std::memcpy(out, m_data.data(), second_copy * slot_size);
|
||||||
|
|
||||||
@ -84,9 +83,9 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::vector<T> Pop(std::size_t max_slots = ~std::size_t(0)) {
|
std::vector<T> Pop(std::size_t max_slots = ~std::size_t(0)) {
|
||||||
std::vector<T> out(std::min(max_slots, capacity) * granularity);
|
std::vector<T> out(std::min(max_slots, capacity));
|
||||||
const std::size_t count = Pop(out.data(), out.size() / granularity);
|
const std::size_t count = Pop(out.data(), out.size());
|
||||||
out.resize(count * granularity);
|
out.resize(count);
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,7 +112,7 @@ private:
|
|||||||
alignas(128) std::atomic_size_t m_write_index{0};
|
alignas(128) std::atomic_size_t m_write_index{0};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
std::array<T, granularity * capacity> m_data;
|
std::array<T, capacity> m_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Common
|
} // namespace Common
|
||||||
|
@ -173,6 +173,7 @@ add_library(core STATIC
|
|||||||
hle/kernel/k_scheduler.h
|
hle/kernel/k_scheduler.h
|
||||||
hle/kernel/k_scheduler_lock.h
|
hle/kernel/k_scheduler_lock.h
|
||||||
hle/kernel/k_scoped_lock.h
|
hle/kernel/k_scoped_lock.h
|
||||||
|
hle/kernel/k_scoped_resource_reservation.h
|
||||||
hle/kernel/k_scoped_scheduler_lock_and_sleep.h
|
hle/kernel/k_scoped_scheduler_lock_and_sleep.h
|
||||||
hle/kernel/k_synchronization_object.cpp
|
hle/kernel/k_synchronization_object.cpp
|
||||||
hle/kernel/k_synchronization_object.h
|
hle/kernel/k_synchronization_object.h
|
||||||
|
@ -118,9 +118,10 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
|
|||||||
|
|
||||||
// Check the userspace value.
|
// Check the userspace value.
|
||||||
s32 user_value{};
|
s32 user_value{};
|
||||||
R_UNLESS(UpdateIfEqual(system, std::addressof(user_value), addr, value, value + 1),
|
if (!UpdateIfEqual(system, &user_value, addr, value, value + 1)) {
|
||||||
Svc::ResultInvalidCurrentMemory);
|
LOG_ERROR(Kernel, "Invalid current memory!");
|
||||||
|
return Svc::ResultInvalidCurrentMemory;
|
||||||
|
}
|
||||||
if (user_value != value) {
|
if (user_value != value) {
|
||||||
return Svc::ResultInvalidState;
|
return Svc::ResultInvalidState;
|
||||||
}
|
}
|
||||||
@ -146,61 +147,34 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
|||||||
// Perform signaling.
|
// Perform signaling.
|
||||||
s32 num_waiters{};
|
s32 num_waiters{};
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl(kernel);
|
[[maybe_unused]] const KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
auto it = thread_tree.nfind_light({addr, -1});
|
auto it = thread_tree.nfind_light({addr, -1});
|
||||||
// Determine the updated value.
|
// Determine the updated value.
|
||||||
s32 new_value{};
|
s32 new_value{};
|
||||||
if (/*GetTargetFirmware() >= TargetFirmware_7_0_0*/ true) {
|
if (count <= 0) {
|
||||||
if (count <= 0) {
|
if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
|
||||||
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
new_value = value - 2;
|
||||||
new_value = value - 2;
|
|
||||||
} else {
|
|
||||||
new_value = value + 1;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
new_value = value + 1;
|
||||||
auto tmp_it = it;
|
|
||||||
s32 tmp_num_waiters{};
|
|
||||||
while ((++tmp_it != thread_tree.end()) &&
|
|
||||||
(tmp_it->GetAddressArbiterKey() == addr)) {
|
|
||||||
if ((tmp_num_waiters++) >= count) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tmp_num_waiters < count) {
|
|
||||||
new_value = value - 1;
|
|
||||||
} else {
|
|
||||||
new_value = value;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
new_value = value + 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (count <= 0) {
|
if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
|
||||||
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
|
||||||
new_value = value - 1;
|
|
||||||
} else {
|
|
||||||
new_value = value + 1;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
auto tmp_it = it;
|
auto tmp_it = it;
|
||||||
s32 tmp_num_waiters{};
|
s32 tmp_num_waiters{};
|
||||||
while ((tmp_it != thread_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) &&
|
while (++tmp_it != thread_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {
|
||||||
(tmp_num_waiters < count + 1)) {
|
if (tmp_num_waiters++ >= count) {
|
||||||
++tmp_num_waiters;
|
break;
|
||||||
++tmp_it;
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tmp_num_waiters == 0) {
|
if (tmp_num_waiters < count) {
|
||||||
new_value = value + 1;
|
|
||||||
} else if (tmp_num_waiters <= count) {
|
|
||||||
new_value = value - 1;
|
new_value = value - 1;
|
||||||
} else {
|
} else {
|
||||||
new_value = value;
|
new_value = value;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
new_value = value + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,13 +182,15 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
|||||||
s32 user_value{};
|
s32 user_value{};
|
||||||
bool succeeded{};
|
bool succeeded{};
|
||||||
if (value != new_value) {
|
if (value != new_value) {
|
||||||
succeeded = UpdateIfEqual(system, std::addressof(user_value), addr, value, new_value);
|
succeeded = UpdateIfEqual(system, &user_value, addr, value, new_value);
|
||||||
} else {
|
} else {
|
||||||
succeeded = ReadFromUser(system, std::addressof(user_value), addr);
|
succeeded = ReadFromUser(system, &user_value, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
R_UNLESS(succeeded, Svc::ResultInvalidCurrentMemory);
|
if (!succeeded) {
|
||||||
|
LOG_ERROR(Kernel, "Invalid current memory!");
|
||||||
|
return Svc::ResultInvalidCurrentMemory;
|
||||||
|
}
|
||||||
if (user_value != value) {
|
if (user_value != value) {
|
||||||
return Svc::ResultInvalidState;
|
return Svc::ResultInvalidState;
|
||||||
}
|
}
|
||||||
@ -255,9 +231,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
|||||||
s32 user_value{};
|
s32 user_value{};
|
||||||
bool succeeded{};
|
bool succeeded{};
|
||||||
if (decrement) {
|
if (decrement) {
|
||||||
succeeded = DecrementIfLessThan(system, std::addressof(user_value), addr, value);
|
succeeded = DecrementIfLessThan(system, &user_value, addr, value);
|
||||||
} else {
|
} else {
|
||||||
succeeded = ReadFromUser(system, std::addressof(user_value), addr);
|
succeeded = ReadFromUser(system, &user_value, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!succeeded) {
|
if (!succeeded) {
|
||||||
@ -278,7 +254,7 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set the arbiter.
|
// Set the arbiter.
|
||||||
cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
|
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||||
thread_tree.insert(*cur_thread);
|
thread_tree.insert(*cur_thread);
|
||||||
cur_thread->SetState(ThreadState::Waiting);
|
cur_thread->SetState(ThreadState::Waiting);
|
||||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||||
@ -299,7 +275,7 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
|||||||
|
|
||||||
// Get the result.
|
// Get the result.
|
||||||
KSynchronizationObject* dummy{};
|
KSynchronizationObject* dummy{};
|
||||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
return cur_thread->GetWaitResult(&dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||||
@ -320,7 +296,7 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
|||||||
|
|
||||||
// Read the value from userspace.
|
// Read the value from userspace.
|
||||||
s32 user_value{};
|
s32 user_value{};
|
||||||
if (!ReadFromUser(system, std::addressof(user_value), addr)) {
|
if (!ReadFromUser(system, &user_value, addr)) {
|
||||||
slp.CancelSleep();
|
slp.CancelSleep();
|
||||||
return Svc::ResultInvalidCurrentMemory;
|
return Svc::ResultInvalidCurrentMemory;
|
||||||
}
|
}
|
||||||
@ -338,7 +314,7 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set the arbiter.
|
// Set the arbiter.
|
||||||
cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
|
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||||
thread_tree.insert(*cur_thread);
|
thread_tree.insert(*cur_thread);
|
||||||
cur_thread->SetState(ThreadState::Waiting);
|
cur_thread->SetState(ThreadState::Waiting);
|
||||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||||
@ -359,7 +335,7 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
|||||||
|
|
||||||
// Get the result.
|
// Get the result.
|
||||||
KSynchronizationObject* dummy{};
|
KSynchronizationObject* dummy{};
|
||||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
return cur_thread->GetWaitResult(&dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
67
src/core/hle/kernel/k_scoped_resource_reservation.h
Executable file
67
src/core/hle/kernel/k_scoped_resource_reservation.h
Executable file
@ -0,0 +1,67 @@
|
|||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/kernel/k_resource_limit.h"
|
||||||
|
#include "core/hle/kernel/process.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KScopedResourceReservation {
|
||||||
|
public:
|
||||||
|
explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r,
|
||||||
|
s64 v, s64 timeout)
|
||||||
|
: resource_limit(std::move(l)), value(v), resource(r) {
|
||||||
|
if (resource_limit && value) {
|
||||||
|
success = resource_limit->Reserve(resource, value, timeout);
|
||||||
|
} else {
|
||||||
|
success = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r,
|
||||||
|
s64 v = 1)
|
||||||
|
: resource_limit(std::move(l)), value(v), resource(r) {
|
||||||
|
if (resource_limit && value) {
|
||||||
|
success = resource_limit->Reserve(resource, value);
|
||||||
|
} else {
|
||||||
|
success = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v, s64 t)
|
||||||
|
: KScopedResourceReservation(p->GetResourceLimit(), r, v, t) {}
|
||||||
|
|
||||||
|
explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v = 1)
|
||||||
|
: KScopedResourceReservation(p->GetResourceLimit(), r, v) {}
|
||||||
|
|
||||||
|
~KScopedResourceReservation() noexcept {
|
||||||
|
if (resource_limit && value && success) {
|
||||||
|
// resource was not committed, release the reservation.
|
||||||
|
resource_limit->Release(resource, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Commit the resource reservation, destruction of this object does not release the resource
|
||||||
|
void Commit() {
|
||||||
|
resource_limit = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] bool Succeeded() const {
|
||||||
|
return success;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::shared_ptr<KResourceLimit> resource_limit;
|
||||||
|
s64 value;
|
||||||
|
LimitableResource resource;
|
||||||
|
bool success;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
@ -141,11 +141,17 @@ struct KernelCore::Impl {
|
|||||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Events, 700).IsSuccess());
|
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Events, 700).IsSuccess());
|
||||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200)
|
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200)
|
||||||
.IsSuccess());
|
.IsSuccess());
|
||||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 900).IsSuccess());
|
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 933).IsSuccess());
|
||||||
|
|
||||||
if (!system_resource_limit->Reserve(LimitableResource::PhysicalMemory, 0x60000)) {
|
// Derived from recent software updates. The kernel reserves 27MB
|
||||||
|
constexpr u64 kernel_size{0x1b00000};
|
||||||
|
if (!system_resource_limit->Reserve(LimitableResource::PhysicalMemory, kernel_size)) {
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
// Reserve secure applet memory, introduced in firmware 5.0.0
|
||||||
|
constexpr u64 secure_applet_memory_size{0x400000};
|
||||||
|
ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
|
||||||
|
secure_applet_memory_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializePreemption(KernelCore& kernel) {
|
void InitializePreemption(KernelCore& kernel) {
|
||||||
@ -302,8 +308,11 @@ struct KernelCore::Impl {
|
|||||||
// Allocate slab heaps
|
// Allocate slab heaps
|
||||||
user_slab_heap_pages = std::make_unique<Memory::SlabHeap<Memory::Page>>();
|
user_slab_heap_pages = std::make_unique<Memory::SlabHeap<Memory::Page>>();
|
||||||
|
|
||||||
|
constexpr u64 user_slab_heap_size{0x1ef000};
|
||||||
|
// Reserve slab heaps
|
||||||
|
ASSERT(
|
||||||
|
system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
|
||||||
// Initialize slab heaps
|
// Initialize slab heaps
|
||||||
constexpr u64 user_slab_heap_size{0x3de000};
|
|
||||||
user_slab_heap_pages->Initialize(
|
user_slab_heap_pages->Initialize(
|
||||||
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
|
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
|
||||||
user_slab_heap_size);
|
user_slab_heap_size);
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
#include "common/scope_exit.h"
|
#include "common/scope_exit.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
#include "core/hle/kernel/k_resource_limit.h"
|
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/memory/address_space_info.h"
|
#include "core/hle/kernel/memory/address_space_info.h"
|
||||||
#include "core/hle/kernel/memory/memory_block.h"
|
#include "core/hle/kernel/memory/memory_block.h"
|
||||||
@ -409,27 +409,25 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
|
|||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto process{system.Kernel().CurrentProcess()};
|
|
||||||
const std::size_t remaining_size{size - mapped_size};
|
const std::size_t remaining_size{size - mapped_size};
|
||||||
const std::size_t remaining_pages{remaining_size / PageSize};
|
const std::size_t remaining_pages{remaining_size / PageSize};
|
||||||
|
|
||||||
if (process->GetResourceLimit() &&
|
// Reserve the memory from the process resource limit.
|
||||||
!process->GetResourceLimit()->Reserve(LimitableResource::PhysicalMemory, remaining_size)) {
|
KScopedResourceReservation memory_reservation(
|
||||||
|
system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
|
||||||
|
remaining_size);
|
||||||
|
if (!memory_reservation.Succeeded()) {
|
||||||
|
LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
|
||||||
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||||
}
|
}
|
||||||
|
|
||||||
PageLinkedList page_linked_list;
|
PageLinkedList page_linked_list;
|
||||||
{
|
|
||||||
auto block_guard = detail::ScopeExit([&] {
|
|
||||||
system.Kernel().MemoryManager().Free(page_linked_list, remaining_pages, memory_pool);
|
|
||||||
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, remaining_size);
|
|
||||||
});
|
|
||||||
|
|
||||||
CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages,
|
CASCADE_CODE(
|
||||||
memory_pool));
|
system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, memory_pool));
|
||||||
|
|
||||||
block_guard.Cancel();
|
// We succeeded, so commit the memory reservation.
|
||||||
}
|
memory_reservation.Commit();
|
||||||
|
|
||||||
MapPhysicalMemory(page_linked_list, addr, end_addr);
|
MapPhysicalMemory(page_linked_list, addr, end_addr);
|
||||||
|
|
||||||
@ -781,9 +779,13 @@ ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
|
|||||||
|
|
||||||
const u64 delta{size - previous_heap_size};
|
const u64 delta{size - previous_heap_size};
|
||||||
|
|
||||||
auto process{system.Kernel().CurrentProcess()};
|
// Reserve memory for the heap extension.
|
||||||
if (process->GetResourceLimit() && delta != 0 &&
|
KScopedResourceReservation memory_reservation(
|
||||||
!process->GetResourceLimit()->Reserve(LimitableResource::PhysicalMemory, delta)) {
|
system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
|
||||||
|
delta);
|
||||||
|
|
||||||
|
if (!memory_reservation.Succeeded()) {
|
||||||
|
LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta);
|
||||||
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -800,6 +802,9 @@ ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
|
|||||||
CASCADE_CODE(
|
CASCADE_CODE(
|
||||||
Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup));
|
Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup));
|
||||||
|
|
||||||
|
// Succeeded in allocation, commit the resource reservation
|
||||||
|
memory_reservation.Commit();
|
||||||
|
|
||||||
block_manager->Update(current_heap_addr, num_pages, MemoryState::Normal,
|
block_manager->Update(current_heap_addr, num_pages, MemoryState::Normal,
|
||||||
MemoryPermission::ReadAndWrite);
|
MemoryPermission::ReadAndWrite);
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
#include "core/hle/kernel/k_resource_limit.h"
|
#include "core/hle/kernel/k_resource_limit.h"
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/memory/memory_block_manager.h"
|
#include "core/hle/kernel/memory/memory_block_manager.h"
|
||||||
@ -39,6 +40,7 @@ namespace {
|
|||||||
*/
|
*/
|
||||||
void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
|
void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
|
||||||
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
|
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
|
||||||
|
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
|
||||||
auto thread_res = KThread::Create(system, ThreadType::User, "main", entry_point, priority, 0,
|
auto thread_res = KThread::Create(system, ThreadType::User, "main", entry_point, priority, 0,
|
||||||
owner_process.GetIdealCoreId(), stack_top, &owner_process);
|
owner_process.GetIdealCoreId(), stack_top, &owner_process);
|
||||||
|
|
||||||
@ -117,6 +119,9 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
|
|||||||
|
|
||||||
std::shared_ptr<Process> process = std::make_shared<Process>(system);
|
std::shared_ptr<Process> process = std::make_shared<Process>(system);
|
||||||
process->name = std::move(name);
|
process->name = std::move(name);
|
||||||
|
|
||||||
|
// TODO: This is inaccurate
|
||||||
|
// The process should hold a reference to the kernel-wide resource limit.
|
||||||
process->resource_limit = std::make_shared<KResourceLimit>(kernel, system);
|
process->resource_limit = std::make_shared<KResourceLimit>(kernel, system);
|
||||||
process->status = ProcessStatus::Created;
|
process->status = ProcessStatus::Created;
|
||||||
process->program_id = 0;
|
process->program_id = 0;
|
||||||
@ -155,6 +160,9 @@ void Process::DecrementThreadCount() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
u64 Process::GetTotalPhysicalMemoryAvailable() const {
|
u64 Process::GetTotalPhysicalMemoryAvailable() const {
|
||||||
|
// TODO: This is expected to always return the application memory pool size after accurately
|
||||||
|
// reserving kernel resources. The current workaround uses a process-local resource limit of
|
||||||
|
// application memory pool size, which is inaccurate.
|
||||||
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
|
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
|
||||||
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
|
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
|
||||||
main_thread_stack_size};
|
main_thread_stack_size};
|
||||||
@ -264,6 +272,17 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
|||||||
system_resource_size = metadata.GetSystemResourceSize();
|
system_resource_size = metadata.GetSystemResourceSize();
|
||||||
image_size = code_size;
|
image_size = code_size;
|
||||||
|
|
||||||
|
// Set initial resource limits
|
||||||
|
resource_limit->SetLimitValue(
|
||||||
|
LimitableResource::PhysicalMemory,
|
||||||
|
kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
|
||||||
|
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||||
|
code_size + system_resource_size);
|
||||||
|
if (!memory_reservation.Succeeded()) {
|
||||||
|
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
|
||||||
|
code_size + system_resource_size);
|
||||||
|
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||||
|
}
|
||||||
// Initialize proces address space
|
// Initialize proces address space
|
||||||
if (const ResultCode result{
|
if (const ResultCode result{
|
||||||
page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
|
page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
|
||||||
@ -305,24 +324,22 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
|||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set initial resource limits
|
|
||||||
resource_limit->SetLimitValue(
|
|
||||||
LimitableResource::PhysicalMemory,
|
|
||||||
kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
|
|
||||||
resource_limit->SetLimitValue(LimitableResource::Threads, 608);
|
resource_limit->SetLimitValue(LimitableResource::Threads, 608);
|
||||||
resource_limit->SetLimitValue(LimitableResource::Events, 700);
|
resource_limit->SetLimitValue(LimitableResource::Events, 700);
|
||||||
resource_limit->SetLimitValue(LimitableResource::TransferMemory, 128);
|
resource_limit->SetLimitValue(LimitableResource::TransferMemory, 128);
|
||||||
resource_limit->SetLimitValue(LimitableResource::Sessions, 894);
|
resource_limit->SetLimitValue(LimitableResource::Sessions, 894);
|
||||||
ASSERT(resource_limit->Reserve(LimitableResource::PhysicalMemory, code_size));
|
|
||||||
|
|
||||||
// Create TLS region
|
// Create TLS region
|
||||||
tls_region_address = CreateTLSRegion();
|
tls_region_address = CreateTLSRegion();
|
||||||
|
memory_reservation.Commit();
|
||||||
|
|
||||||
return handle_table.SetSize(capabilities.GetHandleTableSize());
|
return handle_table.SetSize(capabilities.GetHandleTableSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
void Process::Run(s32 main_thread_priority, u64 stack_size) {
|
void Process::Run(s32 main_thread_priority, u64 stack_size) {
|
||||||
AllocateMainThreadStack(stack_size);
|
AllocateMainThreadStack(stack_size);
|
||||||
|
resource_limit->Reserve(LimitableResource::Threads, 1);
|
||||||
|
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
|
||||||
|
|
||||||
const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size};
|
const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size};
|
||||||
ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError());
|
ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError());
|
||||||
@ -330,8 +347,6 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
|
|||||||
ChangeStatus(ProcessStatus::Running);
|
ChangeStatus(ProcessStatus::Running);
|
||||||
|
|
||||||
SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
|
SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
|
||||||
resource_limit->Reserve(LimitableResource::Threads, 1);
|
|
||||||
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Process::PrepareForTermination() {
|
void Process::PrepareForTermination() {
|
||||||
@ -358,6 +373,11 @@ void Process::PrepareForTermination() {
|
|||||||
FreeTLSRegion(tls_region_address);
|
FreeTLSRegion(tls_region_address);
|
||||||
tls_region_address = 0;
|
tls_region_address = 0;
|
||||||
|
|
||||||
|
if (resource_limit) {
|
||||||
|
resource_limit->Release(LimitableResource::PhysicalMemory,
|
||||||
|
main_thread_stack_size + image_size);
|
||||||
|
}
|
||||||
|
|
||||||
ChangeStatus(ProcessStatus::Exited);
|
ChangeStatus(ProcessStatus::Exited);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,15 +4,23 @@
|
|||||||
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "core/hle/kernel/client_session.h"
|
#include "core/hle/kernel/client_session.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||||
#include "core/hle/kernel/server_session.h"
|
#include "core/hle/kernel/server_session.h"
|
||||||
#include "core/hle/kernel/session.h"
|
#include "core/hle/kernel/session.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||||
Session::~Session() = default;
|
Session::~Session() {
|
||||||
|
// Release reserved resource when the Session pair was created.
|
||||||
|
kernel.GetSystemResourceLimit()->Release(LimitableResource::Sessions, 1);
|
||||||
|
}
|
||||||
|
|
||||||
Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
|
Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
|
||||||
|
// Reserve a new session from the resource limit.
|
||||||
|
KScopedResourceReservation session_reservation(kernel.GetSystemResourceLimit(),
|
||||||
|
LimitableResource::Sessions);
|
||||||
|
ASSERT(session_reservation.Succeeded());
|
||||||
auto session{std::make_shared<Session>(kernel)};
|
auto session{std::make_shared<Session>(kernel)};
|
||||||
auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()};
|
auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()};
|
||||||
auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()};
|
auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()};
|
||||||
@ -21,6 +29,7 @@ Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
|
|||||||
session->client = client_session;
|
session->client = client_session;
|
||||||
session->server = server_session;
|
session->server = server_session;
|
||||||
|
|
||||||
|
session_reservation.Commit();
|
||||||
return std::make_pair(std::move(client_session), std::move(server_session));
|
return std::make_pair(std::move(client_session), std::move(server_session));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/memory/page_table.h"
|
#include "core/hle/kernel/memory/page_table.h"
|
||||||
#include "core/hle/kernel/shared_memory.h"
|
#include "core/hle/kernel/shared_memory.h"
|
||||||
@ -13,7 +14,9 @@ namespace Kernel {
|
|||||||
SharedMemory::SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory)
|
SharedMemory::SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory)
|
||||||
: Object{kernel}, device_memory{device_memory} {}
|
: Object{kernel}, device_memory{device_memory} {}
|
||||||
|
|
||||||
SharedMemory::~SharedMemory() = default;
|
SharedMemory::~SharedMemory() {
|
||||||
|
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
|
||||||
|
}
|
||||||
|
|
||||||
std::shared_ptr<SharedMemory> SharedMemory::Create(
|
std::shared_ptr<SharedMemory> SharedMemory::Create(
|
||||||
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
|
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
|
||||||
@ -21,6 +24,11 @@ std::shared_ptr<SharedMemory> SharedMemory::Create(
|
|||||||
Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
|
Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
|
||||||
std::string name) {
|
std::string name) {
|
||||||
|
|
||||||
|
const auto resource_limit = kernel.GetSystemResourceLimit();
|
||||||
|
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||||
|
size);
|
||||||
|
ASSERT(memory_reservation.Succeeded());
|
||||||
|
|
||||||
std::shared_ptr<SharedMemory> shared_memory{
|
std::shared_ptr<SharedMemory> shared_memory{
|
||||||
std::make_shared<SharedMemory>(kernel, device_memory)};
|
std::make_shared<SharedMemory>(kernel, device_memory)};
|
||||||
|
|
||||||
@ -32,6 +40,7 @@ std::shared_ptr<SharedMemory> SharedMemory::Create(
|
|||||||
shared_memory->size = size;
|
shared_memory->size = size;
|
||||||
shared_memory->name = name;
|
shared_memory->name = name;
|
||||||
|
|
||||||
|
memory_reservation.Commit();
|
||||||
return shared_memory;
|
return shared_memory;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
#include "core/hle/kernel/k_readable_event.h"
|
#include "core/hle/kernel/k_readable_event.h"
|
||||||
#include "core/hle/kernel/k_resource_limit.h"
|
#include "core/hle/kernel/k_resource_limit.h"
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/k_synchronization_object.h"
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
@ -138,6 +139,7 @@ ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr ds
|
|||||||
enum class ResourceLimitValueType {
|
enum class ResourceLimitValueType {
|
||||||
CurrentValue,
|
CurrentValue,
|
||||||
LimitValue,
|
LimitValue,
|
||||||
|
PeakValue,
|
||||||
};
|
};
|
||||||
|
|
||||||
ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit,
|
ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit,
|
||||||
@ -160,11 +162,17 @@ ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_
|
|||||||
return ERR_INVALID_HANDLE;
|
return ERR_INVALID_HANDLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (value_type == ResourceLimitValueType::CurrentValue) {
|
switch (value_type) {
|
||||||
|
case ResourceLimitValueType::CurrentValue:
|
||||||
return MakeResult(resource_limit_object->GetCurrentValue(type));
|
return MakeResult(resource_limit_object->GetCurrentValue(type));
|
||||||
|
case ResourceLimitValueType::LimitValue:
|
||||||
|
return MakeResult(resource_limit_object->GetLimitValue(type));
|
||||||
|
case ResourceLimitValueType::PeakValue:
|
||||||
|
return MakeResult(resource_limit_object->GetPeakValue(type));
|
||||||
|
default:
|
||||||
|
LOG_ERROR(Kernel_SVC, "Invalid resource value_type: '{}'", value_type);
|
||||||
|
return ERR_INVALID_ENUM_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return MakeResult(resource_limit_object->GetLimitValue(type));
|
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
@ -314,8 +322,6 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
|
|||||||
return ERR_NOT_FOUND;
|
return ERR_NOT_FOUND;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(LimitableResource::Sessions, 1));
|
|
||||||
|
|
||||||
auto client_port = it->second;
|
auto client_port = it->second;
|
||||||
|
|
||||||
std::shared_ptr<ClientSession> client_session;
|
std::shared_ptr<ClientSession> client_session;
|
||||||
@ -1452,8 +1458,13 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
|
|||||||
Svc::ResultInvalidPriority);
|
Svc::ResultInvalidPriority);
|
||||||
R_UNLESS(process.CheckThreadPriority(priority), Svc::ResultInvalidPriority);
|
R_UNLESS(process.CheckThreadPriority(priority), Svc::ResultInvalidPriority);
|
||||||
|
|
||||||
ASSERT(process.GetResourceLimit()->Reserve(
|
KScopedResourceReservation thread_reservation(
|
||||||
LimitableResource::Threads, 1, system.CoreTiming().GetGlobalTimeNs().count() + 100000000));
|
kernel.CurrentProcess(), LimitableResource::Threads, 1,
|
||||||
|
system.CoreTiming().GetGlobalTimeNs().count() + 100000000);
|
||||||
|
if (!thread_reservation.Succeeded()) {
|
||||||
|
LOG_ERROR(Kernel_SVC, "Could not reserve a new thread");
|
||||||
|
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||||
|
}
|
||||||
|
|
||||||
std::shared_ptr<KThread> thread;
|
std::shared_ptr<KThread> thread;
|
||||||
{
|
{
|
||||||
@ -1473,6 +1484,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
|
|||||||
// Set the thread name for debugging purposes.
|
// Set the thread name for debugging purposes.
|
||||||
thread->SetName(
|
thread->SetName(
|
||||||
fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle));
|
fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle));
|
||||||
|
thread_reservation.Commit();
|
||||||
|
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
@ -1787,6 +1799,13 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
|
// Reserve a new transfer memory from the process resource limit.
|
||||||
|
KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(),
|
||||||
|
LimitableResource::TransferMemory);
|
||||||
|
if (!trmem_reservation.Succeeded()) {
|
||||||
|
LOG_ERROR(Kernel_SVC, "Could not reserve a new transfer memory");
|
||||||
|
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||||
|
}
|
||||||
auto transfer_mem_handle = TransferMemory::Create(kernel, system.Memory(), addr, size, perms);
|
auto transfer_mem_handle = TransferMemory::Create(kernel, system.Memory(), addr, size, perms);
|
||||||
|
|
||||||
if (const auto reserve_result{transfer_mem_handle->Reserve()}; reserve_result.IsError()) {
|
if (const auto reserve_result{transfer_mem_handle->Reserve()}; reserve_result.IsError()) {
|
||||||
@ -1798,6 +1817,7 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
|
|||||||
if (result.Failed()) {
|
if (result.Failed()) {
|
||||||
return result.Code();
|
return result.Code();
|
||||||
}
|
}
|
||||||
|
trmem_reservation.Commit();
|
||||||
|
|
||||||
*handle = *result;
|
*handle = *result;
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
@ -1879,13 +1899,25 @@ static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle
|
|||||||
static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
|
static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
|
||||||
LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
|
LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
|
||||||
|
|
||||||
|
auto& kernel = system.Kernel();
|
||||||
// Get the current handle table.
|
// Get the current handle table.
|
||||||
const HandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
const HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
||||||
|
|
||||||
|
// Reserve a new event from the process resource limit.
|
||||||
|
KScopedResourceReservation event_reservation(kernel.CurrentProcess(),
|
||||||
|
LimitableResource::Events);
|
||||||
|
if (!event_reservation.Succeeded()) {
|
||||||
|
LOG_ERROR(Kernel, "Could not reserve a new event");
|
||||||
|
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||||
|
}
|
||||||
|
|
||||||
// Get the writable event.
|
// Get the writable event.
|
||||||
auto writable_event = handle_table.Get<KWritableEvent>(event_handle);
|
auto writable_event = handle_table.Get<KWritableEvent>(event_handle);
|
||||||
R_UNLESS(writable_event, Svc::ResultInvalidHandle);
|
R_UNLESS(writable_event, Svc::ResultInvalidHandle);
|
||||||
|
|
||||||
|
// Commit the successfuly reservation.
|
||||||
|
event_reservation.Commit();
|
||||||
|
|
||||||
return writable_event->Signal();
|
return writable_event->Signal();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
// Licensed under GPLv2 or any later version
|
// Licensed under GPLv2 or any later version
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "core/hle/kernel/k_resource_limit.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/memory/page_table.h"
|
#include "core/hle/kernel/memory/page_table.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
@ -17,6 +18,7 @@ TransferMemory::TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory)
|
|||||||
TransferMemory::~TransferMemory() {
|
TransferMemory::~TransferMemory() {
|
||||||
// Release memory region when transfer memory is destroyed
|
// Release memory region when transfer memory is destroyed
|
||||||
Reset();
|
Reset();
|
||||||
|
owner_process->GetResourceLimit()->Release(LimitableResource::TransferMemory, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel,
|
std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel,
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
namespace Common {
|
namespace Common {
|
||||||
|
|
||||||
TEST_CASE("RingBuffer: Basic Tests", "[common]") {
|
TEST_CASE("RingBuffer: Basic Tests", "[common]") {
|
||||||
RingBuffer<char, 4, 1> buf;
|
RingBuffer<char, 4> buf;
|
||||||
|
|
||||||
// Pushing values into a ring buffer with space should succeed.
|
// Pushing values into a ring buffer with space should succeed.
|
||||||
for (std::size_t i = 0; i < 4; i++) {
|
for (std::size_t i = 0; i < 4; i++) {
|
||||||
@ -77,7 +77,7 @@ TEST_CASE("RingBuffer: Basic Tests", "[common]") {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("RingBuffer: Threaded Test", "[common]") {
|
TEST_CASE("RingBuffer: Threaded Test", "[common]") {
|
||||||
RingBuffer<char, 4, 2> buf;
|
RingBuffer<char, 8> buf;
|
||||||
const char seed = 42;
|
const char seed = 42;
|
||||||
const std::size_t count = 1000000;
|
const std::size_t count = 1000000;
|
||||||
std::size_t full = 0;
|
std::size_t full = 0;
|
||||||
@ -92,8 +92,8 @@ TEST_CASE("RingBuffer: Threaded Test", "[common]") {
|
|||||||
std::array<char, 2> value = {seed, seed};
|
std::array<char, 2> value = {seed, seed};
|
||||||
std::size_t i = 0;
|
std::size_t i = 0;
|
||||||
while (i < count) {
|
while (i < count) {
|
||||||
if (const std::size_t c = buf.Push(&value[0], 1); c > 0) {
|
if (const std::size_t c = buf.Push(&value[0], 2); c > 0) {
|
||||||
REQUIRE(c == 1U);
|
REQUIRE(c == 2U);
|
||||||
i++;
|
i++;
|
||||||
next_value(value);
|
next_value(value);
|
||||||
} else {
|
} else {
|
||||||
@ -107,7 +107,7 @@ TEST_CASE("RingBuffer: Threaded Test", "[common]") {
|
|||||||
std::array<char, 2> value = {seed, seed};
|
std::array<char, 2> value = {seed, seed};
|
||||||
std::size_t i = 0;
|
std::size_t i = 0;
|
||||||
while (i < count) {
|
while (i < count) {
|
||||||
if (const std::vector<char> v = buf.Pop(1); v.size() > 0) {
|
if (const std::vector<char> v = buf.Pop(2); v.size() > 0) {
|
||||||
REQUIRE(v.size() == 2U);
|
REQUIRE(v.size() == 2U);
|
||||||
REQUIRE(v[0] == value[0]);
|
REQUIRE(v[0] == value[0]);
|
||||||
REQUIRE(v[1] == value[1]);
|
REQUIRE(v[1] == value[1]);
|
||||||
|
Loading…
Reference in New Issue
Block a user