early-access version 2790
This commit is contained in:
@@ -143,8 +143,6 @@ add_library(core STATIC
|
||||
frontend/emu_window.h
|
||||
frontend/framebuffer_layout.cpp
|
||||
frontend/framebuffer_layout.h
|
||||
hardware_interrupt_manager.cpp
|
||||
hardware_interrupt_manager.h
|
||||
hid/emulated_console.cpp
|
||||
hid/emulated_console.h
|
||||
hid/emulated_controller.cpp
|
||||
@@ -528,6 +526,12 @@ add_library(core STATIC
|
||||
hle/service/ns/pdm_qry.h
|
||||
hle/service/ns/pl_u.cpp
|
||||
hle/service/ns/pl_u.h
|
||||
hle/service/nvdrv/core/container.cpp
|
||||
hle/service/nvdrv/core/container.h
|
||||
hle/service/nvdrv/core/nvmap.cpp
|
||||
hle/service/nvdrv/core/nvmap.h
|
||||
hle/service/nvdrv/core/syncpoint_manager.cpp
|
||||
hle/service/nvdrv/core/syncpoint_manager.h
|
||||
hle/service/nvdrv/devices/nvdevice.h
|
||||
hle/service/nvdrv/devices/nvdisp_disp0.cpp
|
||||
hle/service/nvdrv/devices/nvdisp_disp0.h
|
||||
@@ -556,8 +560,6 @@ add_library(core STATIC
|
||||
hle/service/nvdrv/nvdrv_interface.h
|
||||
hle/service/nvdrv/nvmemp.cpp
|
||||
hle/service/nvdrv/nvmemp.h
|
||||
hle/service/nvdrv/syncpoint_manager.cpp
|
||||
hle/service/nvdrv/syncpoint_manager.h
|
||||
hle/service/nvflinger/binder.h
|
||||
hle/service/nvflinger/buffer_item.h
|
||||
hle/service/nvflinger/buffer_item_consumer.cpp
|
||||
|
@@ -27,7 +27,6 @@
|
||||
#include "core/file_sys/savedata_factory.h"
|
||||
#include "core/file_sys/vfs_concat.h"
|
||||
#include "core/file_sys/vfs_real.h"
|
||||
#include "core/hardware_interrupt_manager.h"
|
||||
#include "core/hid/hid_core.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
@@ -50,6 +49,7 @@
|
||||
#include "core/reporter.h"
|
||||
#include "core/telemetry_session.h"
|
||||
#include "core/tools/freezer.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
@@ -209,6 +209,7 @@ struct System::Impl {
|
||||
|
||||
telemetry_session = std::make_unique<Core::TelemetrySession>();
|
||||
|
||||
host1x_core = std::make_unique<Tegra::Host1x::Host1x>(system);
|
||||
gpu_core = VideoCore::CreateGPU(emu_window, system);
|
||||
if (!gpu_core) {
|
||||
return SystemResultStatus::ErrorVideoCore;
|
||||
@@ -216,7 +217,6 @@ struct System::Impl {
|
||||
|
||||
service_manager = std::make_shared<Service::SM::ServiceManager>(kernel);
|
||||
services = std::make_unique<Service::Services>(service_manager, system);
|
||||
interrupt_manager = std::make_unique<Hardware::InterruptManager>(system);
|
||||
|
||||
// Initialize time manager, which must happen after kernel is created
|
||||
time_manager.Initialize();
|
||||
@@ -342,6 +342,7 @@ struct System::Impl {
|
||||
core_timing.Shutdown();
|
||||
app_loader.reset();
|
||||
gpu_core.reset();
|
||||
host1x_core.reset();
|
||||
perf_stats.reset();
|
||||
kernel.Shutdown();
|
||||
memory.Reset();
|
||||
@@ -405,7 +406,7 @@ struct System::Impl {
|
||||
/// AppLoader used to load the current executing application
|
||||
std::unique_ptr<Loader::AppLoader> app_loader;
|
||||
std::unique_ptr<Tegra::GPU> gpu_core;
|
||||
std::unique_ptr<Hardware::InterruptManager> interrupt_manager;
|
||||
std::unique_ptr<Tegra::Host1x::Host1x> host1x_core;
|
||||
std::unique_ptr<Core::DeviceMemory> device_memory;
|
||||
Core::Memory::Memory memory;
|
||||
Core::HID::HIDCore hid_core;
|
||||
@@ -608,12 +609,12 @@ const Tegra::GPU& System::GPU() const {
|
||||
return *impl->gpu_core;
|
||||
}
|
||||
|
||||
Core::Hardware::InterruptManager& System::InterruptManager() {
|
||||
return *impl->interrupt_manager;
|
||||
Tegra::Host1x::Host1x& System::Host1x() {
|
||||
return *impl->host1x_core;
|
||||
}
|
||||
|
||||
const Core::Hardware::InterruptManager& System::InterruptManager() const {
|
||||
return *impl->interrupt_manager;
|
||||
const Tegra::Host1x::Host1x& System::Host1x() const {
|
||||
return *impl->host1x_core;
|
||||
}
|
||||
|
||||
VideoCore::RendererBase& System::Renderer() {
|
||||
|
@@ -75,6 +75,9 @@ class TimeManager;
|
||||
namespace Tegra {
|
||||
class DebugContext;
|
||||
class GPU;
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
} // namespace Tegra
|
||||
|
||||
namespace VideoCore {
|
||||
@@ -85,10 +88,6 @@ namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
}
|
||||
|
||||
namespace Core::Hardware {
|
||||
class InterruptManager;
|
||||
}
|
||||
|
||||
namespace Core::HID {
|
||||
class HIDCore;
|
||||
}
|
||||
@@ -244,6 +243,12 @@ public:
|
||||
/// Gets an immutable reference to the GPU interface.
|
||||
[[nodiscard]] const Tegra::GPU& GPU() const;
|
||||
|
||||
/// Gets a mutable reference to the Host1x interface
|
||||
[[nodiscard]] Tegra::Host1x::Host1x& Host1x();
|
||||
|
||||
/// Gets an immutable reference to the Host1x interface.
|
||||
[[nodiscard]] const Tegra::Host1x::Host1x& Host1x() const;
|
||||
|
||||
/// Gets a mutable reference to the renderer.
|
||||
[[nodiscard]] VideoCore::RendererBase& Renderer();
|
||||
|
||||
@@ -274,12 +279,6 @@ public:
|
||||
/// Provides a constant reference to the core timing instance.
|
||||
[[nodiscard]] const Timing::CoreTiming& CoreTiming() const;
|
||||
|
||||
/// Provides a reference to the interrupt manager instance.
|
||||
[[nodiscard]] Core::Hardware::InterruptManager& InterruptManager();
|
||||
|
||||
/// Provides a constant reference to the interrupt manager instance.
|
||||
[[nodiscard]] const Core::Hardware::InterruptManager& InterruptManager() const;
|
||||
|
||||
/// Provides a reference to the kernel instance.
|
||||
[[nodiscard]] Kernel::KernelCore& Kernel();
|
||||
|
||||
|
41
src/core/hle/service/nvdrv/core/container.cpp
Executable file
41
src/core/hle/service/nvdrv/core/container.cpp
Executable file
@@ -0,0 +1,41 @@
|
||||
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
|
||||
struct ContainerImpl {
|
||||
ContainerImpl(Tegra::Host1x::Host1x& host1x_) : file{host1x_}, manager{host1x_} {}
|
||||
NvMap file;
|
||||
SyncpointManager manager;
|
||||
};
|
||||
|
||||
Container::Container(Tegra::Host1x::Host1x& host1x_) {
|
||||
impl = std::make_unique<ContainerImpl>(host1x_);
|
||||
}
|
||||
|
||||
Container::~Container() = default;
|
||||
|
||||
NvMap& Container::GetNvMapFile() {
|
||||
return impl->file;
|
||||
}
|
||||
|
||||
const NvMap& Container::GetNvMapFile() const {
|
||||
return impl->file;
|
||||
}
|
||||
|
||||
SyncpointManager& Container::GetSyncpointManager() {
|
||||
return impl->manager;
|
||||
}
|
||||
|
||||
const SyncpointManager& Container::GetSyncpointManager() const {
|
||||
return impl->manager;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::NvCore
|
42
src/core/hle/service/nvdrv/core/container.h
Executable file
42
src/core/hle/service/nvdrv/core/container.h
Executable file
@@ -0,0 +1,42 @@
|
||||
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
|
||||
class NvMap;
|
||||
class SyncpointManager;
|
||||
|
||||
struct ContainerImpl;
|
||||
|
||||
class Container {
|
||||
public:
|
||||
Container(Tegra::Host1x::Host1x& host1x);
|
||||
~Container();
|
||||
|
||||
NvMap& GetNvMapFile();
|
||||
|
||||
const NvMap& GetNvMapFile() const;
|
||||
|
||||
SyncpointManager& GetSyncpointManager();
|
||||
|
||||
const SyncpointManager& GetSyncpointManager() const;
|
||||
|
||||
private:
|
||||
std::unique_ptr<ContainerImpl> impl;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::NvCore
|
264
src/core/hle/service/nvdrv/core/nvmap.cpp
Executable file
264
src/core/hle/service/nvdrv/core/nvmap.cpp
Executable file
@@ -0,0 +1,264 @@
|
||||
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
using Core::Memory::PAGE_SIZE;
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
NvMap::Handle::Handle(u64 size_, Id id_)
|
||||
: size(size_), aligned_size(size), orig_size(size), id(id_) {
|
||||
flags.raw = 0;
|
||||
}
|
||||
|
||||
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) {
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
// Handles cannot be allocated twice
|
||||
if (allocated) {
|
||||
return NvResult::AccessDenied;
|
||||
}
|
||||
|
||||
flags = pFlags;
|
||||
kind = pKind;
|
||||
align = pAlign < PAGE_SIZE ? PAGE_SIZE : pAlign;
|
||||
|
||||
// This flag is only applicable for handles with an address passed
|
||||
if (pAddress) {
|
||||
flags.keep_uncached_after_free.Assign(0);
|
||||
} else {
|
||||
LOG_CRITICAL(Service_NVDRV,
|
||||
"Mapping nvmap handles without a CPU side address is unimplemented!");
|
||||
}
|
||||
|
||||
size = Common::AlignUp(size, PAGE_SIZE);
|
||||
aligned_size = Common::AlignUp(size, align);
|
||||
address = pAddress;
|
||||
|
||||
// TODO: pin init
|
||||
|
||||
allocated = true;
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult NvMap::Handle::Duplicate(bool internal_session) {
|
||||
// Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS)
|
||||
if (!allocated) [[unlikely]] {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
// If we internally use FromId the duplication tracking of handles won't work accurately due to
|
||||
// us not implementing per-process handle refs.
|
||||
if (internal_session) {
|
||||
internal_dupes++;
|
||||
} else {
|
||||
dupes++;
|
||||
}
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||
|
||||
void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) {
|
||||
std::scoped_lock lock(handles_lock);
|
||||
|
||||
handles.emplace(handle_description->id, std::move(handle_description));
|
||||
}
|
||||
|
||||
void NvMap::UnmapHandle(Handle& handle_description) {
|
||||
// Remove pending unmap queue entry if needed
|
||||
if (handle_description.unmap_queue_entry) {
|
||||
unmap_queue.erase(*handle_description.unmap_queue_entry);
|
||||
handle_description.unmap_queue_entry.reset();
|
||||
}
|
||||
|
||||
// Free and unmap the handle from the SMMU
|
||||
host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
|
||||
handle_description.aligned_size);
|
||||
host1x.Allocator().Free(handle_description.pin_virt_address,
|
||||
static_cast<u32>(handle_description.aligned_size));
|
||||
handle_description.pin_virt_address = 0;
|
||||
}
|
||||
|
||||
bool NvMap::TryRemoveHandle(const Handle& handle_description) {
|
||||
// No dupes left, we can remove from handle map
|
||||
if (handle_description.dupes == 0 && handle_description.internal_dupes == 0) {
|
||||
std::scoped_lock lock(handles_lock);
|
||||
|
||||
auto it{handles.find(handle_description.id)};
|
||||
if (it != handles.end()) {
|
||||
handles.erase(it);
|
||||
}
|
||||
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) {
|
||||
if (!size) [[unlikely]] {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)};
|
||||
auto handle_description{std::make_shared<Handle>(size, id)};
|
||||
AddHandle(handle_description);
|
||||
|
||||
result_out = handle_description;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
|
||||
std::scoped_lock lock(handles_lock);
|
||||
try {
|
||||
return handles.at(handle);
|
||||
} catch ([[maybe_unused]] std::out_of_range& e) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
VAddr NvMap::GetHandleAddress(Handle::Id handle) {
|
||||
std::scoped_lock lock(handles_lock);
|
||||
try {
|
||||
return handles.at(handle)->address;
|
||||
} catch ([[maybe_unused]] std::out_of_range& e) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
|
||||
auto handle_description{GetHandle(handle)};
|
||||
if (!handle_description) [[unlikely]] {
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(handle_description->mutex);
|
||||
if (!handle_description->pins) {
|
||||
// If we're in the unmap queue we can just remove ourselves and return since we're already
|
||||
// mapped
|
||||
{
|
||||
// Lock now to prevent our queue entry from being removed for allocation in-between the
|
||||
// following check and erase
|
||||
std::scoped_lock queueLock(unmap_queue_lock);
|
||||
if (handle_description->unmap_queue_entry) {
|
||||
unmap_queue.erase(*handle_description->unmap_queue_entry);
|
||||
handle_description->unmap_queue_entry.reset();
|
||||
|
||||
handle_description->pins++;
|
||||
return handle_description->pin_virt_address;
|
||||
}
|
||||
}
|
||||
|
||||
// If not then allocate some space and map it
|
||||
u32 address{};
|
||||
auto& smmu_allocator = host1x.Allocator();
|
||||
auto& smmu_memory_manager = host1x.MemoryManager();
|
||||
while (!(address =
|
||||
smmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size)))) {
|
||||
// Free handles until the allocation succeeds
|
||||
std::scoped_lock queueLock(unmap_queue_lock);
|
||||
if (auto freeHandleDesc{unmap_queue.front()}) {
|
||||
// Handles in the unmap queue are guaranteed not to be pinned so don't bother
|
||||
// checking if they are before unmapping
|
||||
std::scoped_lock freeLock(freeHandleDesc->mutex);
|
||||
if (handle_description->pin_virt_address)
|
||||
UnmapHandle(*freeHandleDesc);
|
||||
} else {
|
||||
LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
|
||||
}
|
||||
}
|
||||
|
||||
smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address,
|
||||
handle_description->aligned_size);
|
||||
handle_description->pin_virt_address = address;
|
||||
}
|
||||
|
||||
handle_description->pins++;
|
||||
return handle_description->pin_virt_address;
|
||||
}
|
||||
|
||||
void NvMap::UnpinHandle(Handle::Id handle) {
|
||||
auto handle_description{GetHandle(handle)};
|
||||
if (!handle_description) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(handle_description->mutex);
|
||||
if (--handle_description->pins < 0) {
|
||||
LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!");
|
||||
} else if (!handle_description->pins) {
|
||||
std::scoped_lock queueLock(unmap_queue_lock);
|
||||
|
||||
// Add to the unmap queue allowing this handle's memory to be freed if needed
|
||||
unmap_queue.push_back(handle_description);
|
||||
handle_description->unmap_queue_entry = std::prev(unmap_queue.end());
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) {
|
||||
std::weak_ptr<Handle> hWeak{GetHandle(handle)};
|
||||
FreeInfo freeInfo;
|
||||
|
||||
// We use a weak ptr here so we can tell when the handle has been freed and report that back to
|
||||
// guest
|
||||
if (auto handle_description = hWeak.lock()) {
|
||||
std::scoped_lock lock(handle_description->mutex);
|
||||
|
||||
if (internal_session) {
|
||||
if (--handle_description->internal_dupes < 0)
|
||||
LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!");
|
||||
} else {
|
||||
if (--handle_description->dupes < 0) {
|
||||
LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!");
|
||||
} else if (handle_description->dupes == 0) {
|
||||
// Force unmap the handle
|
||||
if (handle_description->pin_virt_address) {
|
||||
std::scoped_lock queueLock(unmap_queue_lock);
|
||||
UnmapHandle(*handle_description);
|
||||
}
|
||||
|
||||
handle_description->pins = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Try to remove the shared ptr to the handle from the map, if nothing else is using the
|
||||
// handle then it will now be freed when `handle_description` goes out of scope
|
||||
if (TryRemoveHandle(*handle_description)) {
|
||||
LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle);
|
||||
} else {
|
||||
LOG_DEBUG(Service_NVDRV,
|
||||
"Tried to free nvmap handle: {} but didn't as it still has duplicates",
|
||||
handle);
|
||||
}
|
||||
|
||||
freeInfo = {
|
||||
.address = handle_description->address,
|
||||
.size = handle_description->size,
|
||||
.was_uncached = handle_description->flags.map_uncached.Value() != 0,
|
||||
};
|
||||
} else {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed
|
||||
if (!hWeak.expired()) {
|
||||
LOG_ERROR(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle);
|
||||
freeInfo.address = 0;
|
||||
}
|
||||
|
||||
return freeInfo;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::NvCore
|
172
src/core/hle/service/nvdrv/core/nvmap.h
Executable file
172
src/core/hle/service/nvdrv/core/nvmap.h
Executable file
@@ -0,0 +1,172 @@
|
||||
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <unordered_map>
|
||||
#include <assert.h>
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
/**
|
||||
* @brief The nvmap core class holds the global state for nvmap and provides methods to manage
|
||||
* handles
|
||||
*/
|
||||
class NvMap {
|
||||
public:
|
||||
/**
|
||||
* @brief A handle to a contiguous block of memory in an application's address space
|
||||
*/
|
||||
struct Handle {
|
||||
std::mutex mutex;
|
||||
|
||||
u64 align{}; //!< The alignment to use when pinning the handle onto the SMMU
|
||||
u64 size; //!< Page-aligned size of the memory the handle refers to
|
||||
u64 aligned_size; //!< `align`-aligned size of the memory the handle refers to
|
||||
u64 orig_size; //!< Original unaligned size of the memory this handle refers to
|
||||
|
||||
s32 dupes{1}; //!< How many guest references there are to this handle
|
||||
s32 internal_dupes{0}; //!< How many emulator-internal references there are to this handle
|
||||
|
||||
using Id = u32;
|
||||
Id id; //!< A globally unique identifier for this handle
|
||||
|
||||
s32 pins{};
|
||||
u32 pin_virt_address{};
|
||||
std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{};
|
||||
|
||||
union Flags {
|
||||
u32 raw;
|
||||
BitField<0, 1, u32> map_uncached; //!< If the handle should be mapped as uncached
|
||||
BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was
|
||||
//!< allocated with a fixed address
|
||||
BitField<4, 1, u32> _unk0_; //!< Passed to IOVMM for pins
|
||||
} flags{};
|
||||
static_assert(sizeof(Flags) == sizeof(u32));
|
||||
|
||||
u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to,
|
||||
//!< this can also be in the nvdrv tmem
|
||||
bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
|
||||
//!< call
|
||||
|
||||
u8 kind{}; //!< Used for memory compression
|
||||
bool allocated{}; //!< If the handle has been allocated with `Alloc`
|
||||
|
||||
u64 dma_map_addr{}; //! remove me after implementing pinning.
|
||||
|
||||
Handle(u64 size, Id id);
|
||||
|
||||
/**
|
||||
* @brief Sets up the handle with the given memory config, can allocate memory from the tmem
|
||||
* if a 0 address is passed
|
||||
*/
|
||||
[[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress);
|
||||
|
||||
/**
|
||||
* @brief Increases the dupe counter of the handle for the given session
|
||||
*/
|
||||
[[nodiscard]] NvResult Duplicate(bool internal_session);
|
||||
|
||||
/**
|
||||
* @brief Obtains a pointer to the handle's memory and marks the handle it as having been
|
||||
* mapped
|
||||
*/
|
||||
u8* GetPointer() {
|
||||
if (!address) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
is_shared_mem_mapped = true;
|
||||
return reinterpret_cast<u8*>(address);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
std::list<std::shared_ptr<Handle>> unmap_queue{};
|
||||
std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue`
|
||||
|
||||
std::unordered_map<Handle::Id, std::shared_ptr<Handle>>
|
||||
handles{}; //!< Main owning map of handles
|
||||
std::mutex handles_lock; //!< Protects access to `handles`
|
||||
|
||||
static constexpr u32 HandleIdIncrement{
|
||||
4}; //!< Each new handle ID is an increment of 4 from the previous
|
||||
std::atomic<u32> next_handle_id{HandleIdIncrement};
|
||||
Tegra::Host1x::Host1x& host1x;
|
||||
|
||||
void AddHandle(std::shared_ptr<Handle> handle);
|
||||
|
||||
/**
|
||||
* @brief Unmaps and frees the SMMU memory region a handle is mapped to
|
||||
* @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this
|
||||
*/
|
||||
void UnmapHandle(Handle& handle_description);
|
||||
|
||||
/**
|
||||
* @brief Removes a handle from the map taking its dupes into account
|
||||
* @note handle_description.mutex MUST be locked when calling this
|
||||
* @return If the handle was removed from the map
|
||||
*/
|
||||
bool TryRemoveHandle(const Handle& handle_description);
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Encapsulates the result of a FreeHandle operation
|
||||
*/
|
||||
struct FreeInfo {
|
||||
u64 address; //!< Address the handle referred to before deletion
|
||||
u64 size; //!< Page-aligned handle size
|
||||
bool was_uncached; //!< If the handle was allocated as uncached
|
||||
};
|
||||
|
||||
NvMap(Tegra::Host1x::Host1x& host1x);
|
||||
|
||||
/**
|
||||
* @brief Creates an unallocated handle of the given size
|
||||
*/
|
||||
[[nodiscard]] NvResult CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out);
|
||||
|
||||
std::shared_ptr<Handle> GetHandle(Handle::Id handle);
|
||||
|
||||
VAddr GetHandleAddress(Handle::Id handle);
|
||||
|
||||
/**
|
||||
* @brief Maps a handle into the SMMU address space
|
||||
* @note This operation is refcounted, the number of calls to this must eventually match the
|
||||
* number of calls to `UnpinHandle`
|
||||
* @return The SMMU virtual address that the handle has been mapped to
|
||||
*/
|
||||
u32 PinHandle(Handle::Id handle);
|
||||
|
||||
/**
|
||||
* @brief When this has been called an equal number of times to `PinHandle` for the supplied
|
||||
* handle it will be added to a list of handles to be freed when necessary
|
||||
*/
|
||||
void UnpinHandle(Handle::Id handle);
|
||||
|
||||
/**
|
||||
* @brief Tries to free a handle and remove a single dupe
|
||||
* @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned
|
||||
* describing the prior state of the handle
|
||||
*/
|
||||
std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internal_session);
|
||||
};
|
||||
} // namespace Service::Nvidia::NvCore
|
122
src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
Executable file
122
src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
Executable file
@@ -0,0 +1,122 @@
|
||||
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
|
||||
SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {
|
||||
constexpr u32 VBlank0SyncpointId{26};
|
||||
constexpr u32 VBlank1SyncpointId{27};
|
||||
|
||||
// Reserve both vblank syncpoints as client managed as they use Continuous Mode
|
||||
// Refer to section 14.3.5.3 of the TRM for more information on Continuous Mode
|
||||
// https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/drm/dc.c#L660
|
||||
ReserveSyncpoint(VBlank0SyncpointId, true);
|
||||
ReserveSyncpoint(VBlank1SyncpointId, true);
|
||||
|
||||
for (u32 syncpointId : channel_syncpoints) {
|
||||
if (syncpointId) {
|
||||
ReserveSyncpoint(syncpointId, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SyncpointManager::~SyncpointManager() = default;
|
||||
|
||||
u32 SyncpointManager::ReserveSyncpoint(u32 id, bool clientManaged) {
|
||||
if (syncpoints.at(id).reserved) {
|
||||
UNREACHABLE_MSG("Requested syncpoint is in use");
|
||||
return 0;
|
||||
}
|
||||
|
||||
syncpoints.at(id).reserved = true;
|
||||
syncpoints.at(id).interfaceManaged = clientManaged;
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
u32 SyncpointManager::FindFreeSyncpoint() {
|
||||
for (u32 i{1}; i < syncpoints.size(); i++) {
|
||||
if (!syncpoints[i].reserved) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
UNREACHABLE_MSG("Failed to find a free syncpoint!");
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 SyncpointManager::AllocateSyncpoint(bool clientManaged) {
|
||||
std::lock_guard lock(reservation_lock);
|
||||
return ReserveSyncpoint(FindFreeSyncpoint(), clientManaged);
|
||||
}
|
||||
|
||||
void SyncpointManager::FreeSyncpoint(u32 id) {
|
||||
std::lock_guard lock(reservation_lock);
|
||||
ASSERT(syncpoints.at(id).reserved);
|
||||
syncpoints.at(id).reserved = false;
|
||||
}
|
||||
|
||||
bool SyncpointManager::IsSyncpointAllocated(u32 id) {
|
||||
return (id <= SyncpointCount) && syncpoints[id].reserved;
|
||||
}
|
||||
|
||||
bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) {
|
||||
const SyncpointInfo& syncpoint{syncpoints.at(id)};
|
||||
|
||||
if (!syncpoint.reserved) {
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
// If the interface manages counters then we don't keep track of the maximum value as it handles
|
||||
// sanity checking the values then
|
||||
if (syncpoint.interfaceManaged) {
|
||||
return static_cast<s32>(syncpoint.counterMin - threshold) >= 0;
|
||||
} else {
|
||||
return (syncpoint.counterMax - threshold) >= (syncpoint.counterMin - threshold);
|
||||
}
|
||||
}
|
||||
|
||||
u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) {
|
||||
if (!syncpoints.at(id).reserved) {
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
return syncpoints.at(id).counterMax += amount;
|
||||
}
|
||||
|
||||
u32 SyncpointManager::ReadSyncpointMinValue(u32 id) {
|
||||
if (!syncpoints.at(id).reserved) {
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
return syncpoints.at(id).counterMin;
|
||||
}
|
||||
|
||||
u32 SyncpointManager::UpdateMin(u32 id) {
|
||||
if (!syncpoints.at(id).reserved) {
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
syncpoints.at(id).counterMin = host1x.GetSyncpointManager().GetHostSyncpointValue(id);
|
||||
return syncpoints.at(id).counterMin;
|
||||
}
|
||||
|
||||
NvFence SyncpointManager::GetSyncpointFence(u32 id) {
|
||||
if (!syncpoints.at(id).reserved) {
|
||||
UNREACHABLE();
|
||||
return NvFence{};
|
||||
}
|
||||
|
||||
return {.id = static_cast<s32>(id), .value = syncpoints.at(id).counterMax};
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::NvCore
|
139
src/core/hle/service/nvdrv/core/syncpoint_manager.h
Executable file
139
src/core/hle/service/nvdrv/core/syncpoint_manager.h
Executable file
@@ -0,0 +1,139 @@
|
||||
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
|
||||
enum class ChannelType : u32 {
|
||||
MsEnc = 0,
|
||||
VIC = 1,
|
||||
GPU = 2,
|
||||
NvDec = 3,
|
||||
Display = 4,
|
||||
NvJpg = 5,
|
||||
TSec = 6,
|
||||
Max = 7
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief SyncpointManager handles allocating and accessing host1x syncpoints, these are cached
|
||||
* versions of the HW syncpoints which are intermittently synced
|
||||
* @note Refer to Chapter 14 of the Tegra X1 TRM for an exhaustive overview of them
|
||||
* @url https://http.download.nvidia.com/tegra-public-appnotes/host1x.html
|
||||
* @url
|
||||
* https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/jetson-tx1/drivers/video/tegra/host/nvhost_syncpt.c
|
||||
*/
|
||||
class SyncpointManager final {
|
||||
public:
|
||||
explicit SyncpointManager(Tegra::Host1x::Host1x& host1x);
|
||||
~SyncpointManager();
|
||||
|
||||
/**
|
||||
* @brief Checks if the given syncpoint is both allocated and below the number of HW syncpoints
|
||||
*/
|
||||
bool IsSyncpointAllocated(u32 id);
|
||||
|
||||
/**
|
||||
* @brief Finds a free syncpoint and reserves it
|
||||
* @return The ID of the reserved syncpoint
|
||||
*/
|
||||
u32 AllocateSyncpoint(bool clientManaged);
|
||||
|
||||
/**
|
||||
* @url
|
||||
* https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259
|
||||
*/
|
||||
bool HasSyncpointExpired(u32 id, u32 threshold);
|
||||
|
||||
bool IsFenceSignalled(NvFence fence) {
|
||||
return HasSyncpointExpired(fence.id, fence.value);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Atomically increments the maximum value of a syncpoint by the given amount
|
||||
* @return The new max value of the syncpoint
|
||||
*/
|
||||
u32 IncrementSyncpointMaxExt(u32 id, u32 amount);
|
||||
|
||||
/**
|
||||
* @return The minimum value of the syncpoint
|
||||
*/
|
||||
u32 ReadSyncpointMinValue(u32 id);
|
||||
|
||||
/**
|
||||
* @brief Synchronises the minimum value of the syncpoint to with the GPU
|
||||
* @return The new minimum value of the syncpoint
|
||||
*/
|
||||
u32 UpdateMin(u32 id);
|
||||
|
||||
/**
|
||||
* @brief Frees the usage of a syncpoint.
|
||||
*/
|
||||
void FreeSyncpoint(u32 id);
|
||||
|
||||
/**
|
||||
* @return A fence that will be signalled once this syncpoint hits its maximum value
|
||||
*/
|
||||
NvFence GetSyncpointFence(u32 id);
|
||||
|
||||
static constexpr std::array<u32, static_cast<u32>(ChannelType::Max)> channel_syncpoints{
|
||||
0x0, // `MsEnc` is unimplemented
|
||||
0xC, // `VIC`
|
||||
0x0, // `GPU` syncpoints are allocated per-channel instead
|
||||
0x36, // `NvDec`
|
||||
0x0, // `Display` is unimplemented
|
||||
0x37, // `NvJpg`
|
||||
0x0, // `TSec` is unimplemented
|
||||
}; //!< Maps each channel ID to a constant syncpoint
|
||||
|
||||
private:
|
||||
/**
|
||||
* @note reservation_lock should be locked when calling this
|
||||
*/
|
||||
u32 ReserveSyncpoint(u32 id, bool clientManaged);
|
||||
|
||||
/**
|
||||
* @return The ID of the first free syncpoint
|
||||
*/
|
||||
u32 FindFreeSyncpoint();
|
||||
|
||||
struct SyncpointInfo {
|
||||
std::atomic<u32> counterMin; //!< The least value the syncpoint can be (The value it was
|
||||
//!< when it was last synchronized with host1x)
|
||||
std::atomic<u32> counterMax; //!< The maximum value the syncpoint can reach according to the
|
||||
//!< current usage
|
||||
bool interfaceManaged; //!< If the syncpoint is managed by a host1x client interface, a
|
||||
//!< client interface is a HW block that can handle host1x
|
||||
//!< transactions on behalf of a host1x client (Which would otherwise
|
||||
//!< need to be manually synced using PIO which is synchronous and
|
||||
//!< requires direct cooperation of the CPU)
|
||||
bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved
|
||||
//!< value
|
||||
};
|
||||
|
||||
constexpr static std::size_t SyncpointCount{192};
|
||||
std::array<SyncpointInfo, SyncpointCount> syncpoints{};
|
||||
std::mutex reservation_lock;
|
||||
|
||||
Tegra::Host1x::Host1x& host1x;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::NvCore
|
@@ -11,6 +11,10 @@ namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
class KEvent;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
/// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to
|
||||
@@ -64,6 +68,10 @@ public:
|
||||
*/
|
||||
virtual void OnClose(DeviceFD fd) = 0;
|
||||
|
||||
virtual Kernel::KEvent* QueryEvent(u32 event_id) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
protected:
|
||||
Core::System& system;
|
||||
};
|
||||
|
@@ -5,15 +5,16 @@
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/perf_stats.h"
|
||||
#include "video_core/gpu.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvdisp_disp0::nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_)
|
||||
: nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {}
|
||||
nvdisp_disp0::nvdisp_disp0(Core::System& system_, NvCore::Container& core)
|
||||
: nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {}
|
||||
nvdisp_disp0::~nvdisp_disp0() = default;
|
||||
|
||||
NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
@@ -39,8 +40,9 @@ void nvdisp_disp0::OnClose(DeviceFD fd) {}
|
||||
|
||||
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
|
||||
u32 height, u32 stride, android::BufferTransformFlags transform,
|
||||
const Common::Rectangle<int>& crop_rect) {
|
||||
const VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle);
|
||||
const Common::Rectangle<int>& crop_rect,
|
||||
std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) {
|
||||
const VAddr addr = nvmap.GetHandleAddress(buffer_handle);
|
||||
LOG_TRACE(Service,
|
||||
"Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
|
||||
addr, offset, width, height, stride, format);
|
||||
@@ -49,9 +51,14 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form
|
||||
stride, format, transform, crop_rect};
|
||||
|
||||
system.GetPerfStats().EndSystemFrame();
|
||||
system.GPU().SwapBuffers(&framebuffer);
|
||||
system.GPU().RequestSwapBuffers(&framebuffer, fences, num_fences);
|
||||
system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs());
|
||||
system.GetPerfStats().BeginSystemFrame();
|
||||
}
|
||||
|
||||
Kernel::KEvent* nvdisp_disp0::QueryEvent(u32 event_id) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Unknown DISP Event {}", event_id);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -11,13 +11,18 @@
|
||||
#include "core/hle/service/nvflinger/buffer_transform_flags.h"
|
||||
#include "core/hle/service/nvflinger/pixel_format.h"
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
class Container;
|
||||
class NvMap;
|
||||
} // namespace Service::Nvidia::NvCore
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvmap;
|
||||
|
||||
class nvdisp_disp0 final : public nvdevice {
|
||||
public:
|
||||
explicit nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_);
|
||||
explicit nvdisp_disp0(Core::System& system_, NvCore::Container& core);
|
||||
~nvdisp_disp0() override;
|
||||
|
||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
@@ -33,10 +38,14 @@ public:
|
||||
/// Performs a screen flip, drawing the buffer pointed to by the handle.
|
||||
void flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height,
|
||||
u32 stride, android::BufferTransformFlags transform,
|
||||
const Common::Rectangle<int>& crop_rect);
|
||||
const Common::Rectangle<int>& crop_rect,
|
||||
std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences);
|
||||
|
||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
NvCore::Container& container;
|
||||
NvCore::NvMap& nvmap;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -1,21 +1,31 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include <utility>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
#include "video_core/control/channel_state.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_)
|
||||
: nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {}
|
||||
nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, Module& module_, NvCore::Container& core)
|
||||
: nvdevice{system_}, module{module_}, container{core}, nvmap{core.GetNvMapFile()}, vm{},
|
||||
gmmu{} {}
|
||||
|
||||
nvhost_as_gpu::~nvhost_as_gpu() = default;
|
||||
|
||||
NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
@@ -82,12 +92,51 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>&
|
||||
IoctlAllocAsEx params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size);
|
||||
if (params.big_page_size == 0) {
|
||||
params.big_page_size = DEFAULT_BIG_PAGE_SIZE;
|
||||
LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size);
|
||||
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
if (vm.initialised) {
|
||||
UNREACHABLE_MSG("Cannot initialise an address space twice!");
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
|
||||
big_page_size = params.big_page_size;
|
||||
if (params.big_page_size) {
|
||||
if (!std::has_single_bit(params.big_page_size)) {
|
||||
LOG_ERROR(Service_NVDRV, "Non power-of-2 big page size: 0x{:X}!", params.big_page_size);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if (!(params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES)) {
|
||||
LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
vm.big_page_size = params.big_page_size;
|
||||
vm.big_page_size_bits = static_cast<u32>(std::countr_zero(params.big_page_size));
|
||||
|
||||
vm.va_range_start = params.big_page_size << VM::VA_START_SHIFT;
|
||||
}
|
||||
|
||||
// If this is unspecified then default values should be used
|
||||
if (params.va_range_start) {
|
||||
vm.va_range_start = params.va_range_start;
|
||||
vm.va_range_split = params.va_range_split;
|
||||
vm.va_range_end = params.va_range_end;
|
||||
}
|
||||
|
||||
const u64 start_pages{vm.va_range_start >> VM::PAGE_SIZE_BITS};
|
||||
const u64 end_pages{vm.va_range_split >> VM::PAGE_SIZE_BITS};
|
||||
vm.small_page_allocator = std::make_shared<VM::Allocator>(start_pages, end_pages);
|
||||
|
||||
const u64 start_big_pages{vm.va_range_split >> vm.big_page_size_bits};
|
||||
const u64 end_big_pages{(vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits};
|
||||
vm.big_page_allocator = std::make_unique<VM::Allocator>(start_big_pages, end_big_pages);
|
||||
|
||||
gmmu = std::make_shared<Tegra::MemoryManager>(system, 40, vm.big_page_size_bits,
|
||||
VM::PAGE_SIZE_BITS);
|
||||
system.GPU().InitAddressSpace(*gmmu);
|
||||
vm.initialised = true;
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
@@ -99,21 +148,75 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
|
||||
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
|
||||
params.page_size, params.flags);
|
||||
|
||||
const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
|
||||
if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) {
|
||||
params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size);
|
||||
} else {
|
||||
params.offset = system.GPU().MemoryManager().Allocate(size, params.align);
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
if (!vm.initialised) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
auto result = NvResult::Success;
|
||||
if (!params.offset) {
|
||||
LOG_CRITICAL(Service_NVDRV, "allocation failed for size {}", size);
|
||||
result = NvResult::InsufficientMemory;
|
||||
if (params.page_size != VM::PAGE_SIZE && params.page_size != vm.big_page_size) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if (params.page_size != vm.big_page_size &&
|
||||
((params.flags & MappingFlags::Sparse) != MappingFlags::None)) {
|
||||
UNIMPLEMENTED_MSG("Sparse small pages are not implemented!");
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
const u32 page_size_bits{params.page_size == VM::PAGE_SIZE ? VM::PAGE_SIZE_BITS
|
||||
: vm.big_page_size_bits};
|
||||
|
||||
auto& allocator{params.page_size == VM::PAGE_SIZE ? *vm.small_page_allocator
|
||||
: *vm.big_page_allocator};
|
||||
|
||||
if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
|
||||
allocator.AllocateFixed(static_cast<u32>(params.offset >> page_size_bits), params.pages);
|
||||
} else {
|
||||
params.offset = static_cast<u64>(allocator.Allocate(params.pages)) << page_size_bits;
|
||||
if (!params.offset) {
|
||||
UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!");
|
||||
return NvResult::InsufficientMemory;
|
||||
}
|
||||
}
|
||||
|
||||
u64 size{static_cast<u64>(params.pages) * params.page_size};
|
||||
|
||||
if ((params.flags & MappingFlags::Sparse) != MappingFlags::None) {
|
||||
gmmu->MapSparse(params.offset, size);
|
||||
}
|
||||
|
||||
allocation_map[params.offset] = {
|
||||
.size = size,
|
||||
.mappings{},
|
||||
.page_size = params.page_size,
|
||||
.sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None,
|
||||
.big_pages = params.page_size != VM::PAGE_SIZE,
|
||||
};
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return result;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
|
||||
auto mapping{mapping_map.at(offset)};
|
||||
|
||||
if (!mapping->fixed) {
|
||||
auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
|
||||
u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
|
||||
|
||||
allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits),
|
||||
static_cast<u32>(mapping->size >> page_size_bits));
|
||||
}
|
||||
|
||||
// Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
|
||||
// Only FreeSpace can unmap them fully
|
||||
if (mapping->sparse_alloc)
|
||||
gmmu->MapSparse(offset, mapping->size, mapping->big_page);
|
||||
else
|
||||
gmmu->Unmap(offset, mapping->size);
|
||||
|
||||
mapping_map.erase(offset);
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
@@ -123,8 +226,40 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>&
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
|
||||
params.pages, params.page_size);
|
||||
|
||||
system.GPU().MemoryManager().Unmap(params.offset,
|
||||
static_cast<std::size_t>(params.pages) * params.page_size);
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
if (!vm.initialised) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
try {
|
||||
auto allocation{allocation_map[params.offset]};
|
||||
|
||||
if (allocation.page_size != params.page_size ||
|
||||
allocation.size != (static_cast<u64>(params.pages) * params.page_size)) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
for (const auto& mapping : allocation.mappings) {
|
||||
FreeMappingLocked(mapping->offset);
|
||||
}
|
||||
|
||||
// Unset sparse flag if required
|
||||
if (allocation.sparse) {
|
||||
gmmu->Unmap(params.offset, allocation.size);
|
||||
}
|
||||
|
||||
auto& allocator{params.page_size == VM::PAGE_SIZE ? *vm.small_page_allocator
|
||||
: *vm.big_page_allocator};
|
||||
u32 page_size_bits{params.page_size == VM::PAGE_SIZE ? VM::PAGE_SIZE_BITS
|
||||
: vm.big_page_size_bits};
|
||||
|
||||
allocator.Free(static_cast<u32>(params.offset >> page_size_bits),
|
||||
static_cast<u32>(allocation.size >> page_size_bits));
|
||||
allocation_map.erase(params.offset);
|
||||
} catch ([[maybe_unused]] const std::out_of_range& e) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
@@ -135,35 +270,52 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
|
||||
|
||||
auto result = NvResult::Success;
|
||||
std::vector<IoctlRemapEntry> entries(num_entries);
|
||||
std::memcpy(entries.data(), input.data(), input.size());
|
||||
|
||||
for (const auto& entry : entries) {
|
||||
LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}",
|
||||
entry.offset, entry.nvmap_handle, entry.pages);
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
const auto object{nvmap_dev->GetObject(entry.nvmap_handle)};
|
||||
if (!object) {
|
||||
LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle);
|
||||
result = NvResult::InvalidState;
|
||||
break;
|
||||
if (!vm.initialised) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
for (const auto& entry : entries) {
|
||||
GPUVAddr virtual_address{static_cast<u64>(entry.as_offset_big_pages)
|
||||
<< vm.big_page_size_bits};
|
||||
u64 size{static_cast<u64>(entry.big_pages) << vm.big_page_size_bits};
|
||||
|
||||
auto alloc{allocation_map.upper_bound(virtual_address)};
|
||||
|
||||
if (alloc-- == allocation_map.begin() ||
|
||||
(virtual_address - alloc->first) + size > alloc->second.size) {
|
||||
LOG_WARNING(Service_NVDRV, "Cannot remap into an unallocated region!");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10};
|
||||
const auto size{static_cast<u64>(entry.pages) << 0x10};
|
||||
const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10};
|
||||
const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)};
|
||||
if (!alloc->second.sparse) {
|
||||
LOG_WARNING(Service_NVDRV, "Cannot remap a non-sparse mapping!");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if (!addr) {
|
||||
LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!");
|
||||
result = NvResult::InvalidState;
|
||||
break;
|
||||
const bool use_big_pages = alloc->second.big_pages;
|
||||
if (!entry.handle) {
|
||||
gmmu->MapSparse(virtual_address, size, use_big_pages);
|
||||
} else {
|
||||
auto handle{nvmap.GetHandle(entry.handle)};
|
||||
if (!handle) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
VAddr cpu_address{static_cast<VAddr>(
|
||||
handle->address +
|
||||
(static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
|
||||
|
||||
gmmu->Map(virtual_address, cpu_address, size, use_big_pages);
|
||||
}
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), entries.data(), output.size());
|
||||
return result;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
@@ -173,79 +325,98 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
|
||||
LOG_DEBUG(Service_NVDRV,
|
||||
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
|
||||
", offset={}",
|
||||
params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size,
|
||||
params.flags, params.handle, params.buffer_offset, params.mapping_size,
|
||||
params.offset);
|
||||
|
||||
const auto object{nvmap_dev->GetObject(params.nvmap_handle)};
|
||||
if (!object) {
|
||||
LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
if (!vm.initialised) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
// The real nvservices doesn't make a distinction between handles and ids, and
|
||||
// object can only have one handle and it will be the same as its id. Assert that this is the
|
||||
// case to prevent unexpected behavior.
|
||||
ASSERT(object->id == params.nvmap_handle);
|
||||
auto& gpu = system.GPU();
|
||||
// Remaps a subregion of an existing mapping to a different PA
|
||||
if ((params.flags & MappingFlags::Remap) != MappingFlags::None) {
|
||||
try {
|
||||
auto mapping{mapping_map.at(params.offset)};
|
||||
|
||||
u64 page_size{params.page_size};
|
||||
if (!page_size) {
|
||||
page_size = object->align;
|
||||
}
|
||||
|
||||
if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) {
|
||||
if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) {
|
||||
const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)};
|
||||
const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)};
|
||||
|
||||
if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) {
|
||||
LOG_CRITICAL(Service_NVDRV,
|
||||
"remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, "
|
||||
"mapping_size = {}, offset={}",
|
||||
params.flags, params.nvmap_handle, params.buffer_offset,
|
||||
params.mapping_size, params.offset);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
if (mapping->size < params.mapping_size) {
|
||||
LOG_WARNING(Service_NVDRV,
|
||||
"Cannot remap a partially mapped GPU address space region: 0x{:X}",
|
||||
params.offset);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
} else {
|
||||
LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset);
|
||||
u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
|
||||
VAddr cpu_address{mapping->ptr + params.buffer_offset};
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page);
|
||||
|
||||
return NvResult::Success;
|
||||
} catch ([[maybe_unused]] const std::out_of_range& e) {
|
||||
LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}",
|
||||
params.offset);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
}
|
||||
|
||||
// We can only map objects that have already been assigned a CPU address.
|
||||
ASSERT(object->status == nvmap::Object::Status::Allocated);
|
||||
|
||||
const auto physical_address{object->addr + params.buffer_offset};
|
||||
u64 size{params.mapping_size};
|
||||
if (!size) {
|
||||
size = object->size;
|
||||
auto handle{nvmap.GetHandle(params.handle)};
|
||||
if (!handle) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None};
|
||||
if (is_alloc) {
|
||||
params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size);
|
||||
} else {
|
||||
params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size);
|
||||
}
|
||||
VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)};
|
||||
u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
|
||||
|
||||
auto result = NvResult::Success;
|
||||
if (!params.offset) {
|
||||
LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size);
|
||||
result = NvResult::InvalidState;
|
||||
bool big_page{[&]() {
|
||||
if (Common::IsAligned(handle->align, vm.big_page_size))
|
||||
return true;
|
||||
else if (Common::IsAligned(handle->align, VM::PAGE_SIZE))
|
||||
return false;
|
||||
else {
|
||||
UNREACHABLE();
|
||||
return false;
|
||||
}
|
||||
}()};
|
||||
|
||||
if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
|
||||
auto alloc{allocation_map.upper_bound(params.offset)};
|
||||
|
||||
if (alloc-- == allocation_map.begin() ||
|
||||
(params.offset - alloc->first) + size > alloc->second.size) {
|
||||
UNREACHABLE_MSG("Cannot perform a fixed mapping into an unallocated region!");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
const bool use_big_pages = alloc->second.big_pages && big_page;
|
||||
gmmu->Map(params.offset, cpu_address, size, use_big_pages);
|
||||
|
||||
auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
|
||||
use_big_pages, alloc->second.sparse)};
|
||||
alloc->second.mappings.push_back(mapping);
|
||||
mapping_map[params.offset] = mapping;
|
||||
} else {
|
||||
AddBufferMap(params.offset, size, physical_address, is_alloc);
|
||||
|
||||
auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
|
||||
u32 page_size{big_page ? vm.big_page_size : VM::PAGE_SIZE};
|
||||
u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
|
||||
|
||||
params.offset = static_cast<u64>(allocator.Allocate(
|
||||
static_cast<u32>(Common::AlignUp(size, page_size) >> page_size_bits)))
|
||||
<< page_size_bits;
|
||||
if (!params.offset) {
|
||||
UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!");
|
||||
return NvResult::InsufficientMemory;
|
||||
}
|
||||
|
||||
gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page);
|
||||
|
||||
auto mapping{
|
||||
std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
|
||||
mapping_map[params.offset] = mapping;
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return result;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
@@ -254,47 +425,82 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
|
||||
|
||||
if (const auto size{RemoveBufferMap(params.offset)}; size) {
|
||||
system.GPU().MemoryManager().Unmap(params.offset, *size);
|
||||
} else {
|
||||
LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset);
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
if (!vm.initialised) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
try {
|
||||
auto mapping{mapping_map.at(params.offset)};
|
||||
|
||||
if (!mapping->fixed) {
|
||||
auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
|
||||
u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
|
||||
|
||||
allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits),
|
||||
static_cast<u32>(mapping->size >> page_size_bits));
|
||||
}
|
||||
|
||||
// Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
|
||||
// Only FreeSpace can unmap them fully
|
||||
if (mapping->sparse_alloc) {
|
||||
gmmu->MapSparse(params.offset, mapping->size, mapping->big_page);
|
||||
} else {
|
||||
gmmu->Unmap(params.offset, mapping->size);
|
||||
}
|
||||
|
||||
mapping_map.erase(params.offset);
|
||||
} catch ([[maybe_unused]] const std::out_of_range& e) {
|
||||
LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlBindChannel params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}", params.fd);
|
||||
LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
|
||||
|
||||
channel = params.fd;
|
||||
auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd);
|
||||
gpu_channel_device->channel_state->memory_manager = gmmu;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
|
||||
params.buf_size = 2 * sizeof(VaRegion);
|
||||
|
||||
params.regions = std::array<VaRegion, 2>{
|
||||
VaRegion{
|
||||
.offset = vm.small_page_allocator->vaStart << VM::PAGE_SIZE_BITS,
|
||||
.page_size = VM::PAGE_SIZE,
|
||||
._pad0_{},
|
||||
.pages = vm.small_page_allocator->vaLimit - vm.small_page_allocator->vaStart,
|
||||
},
|
||||
VaRegion{
|
||||
.offset = vm.big_page_allocator->vaStart << vm.big_page_size_bits,
|
||||
.page_size = vm.big_page_size,
|
||||
._pad0_{},
|
||||
.pages = vm.big_page_allocator->vaLimit - vm.big_page_allocator->vaStart,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetVaRegions params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||
params.buf_size);
|
||||
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||
params.buf_size);
|
||||
|
||||
params.buf_size = 0x30;
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
params.small = IoctlVaRegion{
|
||||
.offset = 0x04000000,
|
||||
.page_size = DEFAULT_SMALL_PAGE_SIZE,
|
||||
.pages = 0x3fbfff,
|
||||
};
|
||||
if (!vm.initialised) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
params.big = IoctlVaRegion{
|
||||
.offset = 0x04000000,
|
||||
.page_size = big_page_size,
|
||||
.pages = 0x1bffff,
|
||||
};
|
||||
|
||||
// TODO(ogniK): This probably can stay stubbed but should add support way way later
|
||||
GetVARegionsImpl(params);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
@@ -305,62 +511,27 @@ NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u
|
||||
IoctlGetVaRegions params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||
params.buf_size);
|
||||
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||
params.buf_size);
|
||||
|
||||
params.buf_size = 0x30;
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
params.small = IoctlVaRegion{
|
||||
.offset = 0x04000000,
|
||||
.page_size = 0x1000,
|
||||
.pages = 0x3fbfff,
|
||||
};
|
||||
if (!vm.initialised) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
params.big = IoctlVaRegion{
|
||||
.offset = 0x04000000,
|
||||
.page_size = big_page_size,
|
||||
.pages = 0x1bffff,
|
||||
};
|
||||
|
||||
// TODO(ogniK): This probably can stay stubbed but should add support way way later
|
||||
GetVARegionsImpl(params);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
std::memcpy(inline_output.data(), ¶ms.small, sizeof(IoctlVaRegion));
|
||||
std::memcpy(inline_output.data() + sizeof(IoctlVaRegion), ¶ms.big, sizeof(IoctlVaRegion));
|
||||
std::memcpy(inline_output.data(), ¶ms.regions[0], sizeof(VaRegion));
|
||||
std::memcpy(inline_output.data() + sizeof(VaRegion), ¶ms.regions[1], sizeof(VaRegion));
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
std::optional<nvhost_as_gpu::BufferMap> nvhost_as_gpu::FindBufferMap(GPUVAddr gpu_addr) const {
|
||||
const auto end{buffer_mappings.upper_bound(gpu_addr)};
|
||||
for (auto iter{buffer_mappings.begin()}; iter != end; ++iter) {
|
||||
if (gpu_addr >= iter->second.StartAddr() && gpu_addr < iter->second.EndAddr()) {
|
||||
return iter->second;
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr,
|
||||
bool is_allocated) {
|
||||
buffer_mappings[gpu_addr] = {gpu_addr, size, cpu_addr, is_allocated};
|
||||
}
|
||||
|
||||
std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) {
|
||||
if (const auto iter{buffer_mappings.find(gpu_addr)}; iter != buffer_mappings.end()) {
|
||||
std::size_t size{};
|
||||
|
||||
if (iter->second.IsAllocated()) {
|
||||
size = iter->second.Size();
|
||||
}
|
||||
|
||||
buffer_mappings.erase(iter);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
Kernel::KEvent* nvhost_as_gpu::QueryEvent(u32 event_id) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Unknown AS GPU Event {}", event_id);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -1,35 +1,51 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <bit>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
#include "common/address_space.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Tegra {
|
||||
class MemoryManager;
|
||||
} // namespace Tegra
|
||||
|
||||
namespace Service::Nvidia {
|
||||
class Module;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
class Container;
|
||||
class NvMap;
|
||||
} // namespace Service::Nvidia::NvCore
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
constexpr u32 DEFAULT_BIG_PAGE_SIZE = 1 << 16;
|
||||
constexpr u32 DEFAULT_SMALL_PAGE_SIZE = 1 << 12;
|
||||
|
||||
class nvmap;
|
||||
|
||||
enum class AddressSpaceFlags : u32 {
|
||||
None = 0x0,
|
||||
FixedOffset = 0x1,
|
||||
Remap = 0x100,
|
||||
enum class MappingFlags : u32 {
|
||||
None = 0,
|
||||
Fixed = 1 << 0,
|
||||
Sparse = 1 << 1,
|
||||
Remap = 1 << 8,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags);
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MappingFlags);
|
||||
|
||||
class nvhost_as_gpu final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_);
|
||||
explicit nvhost_as_gpu(Core::System& system_, Module& module, NvCore::Container& core);
|
||||
~nvhost_as_gpu() override;
|
||||
|
||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
@@ -42,46 +58,17 @@ public:
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
private:
|
||||
class BufferMap final {
|
||||
public:
|
||||
constexpr BufferMap() = default;
|
||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||
|
||||
constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_)
|
||||
: start_addr{start_addr_}, end_addr{start_addr_ + size_} {}
|
||||
|
||||
constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_, VAddr cpu_addr_,
|
||||
bool is_allocated_)
|
||||
: start_addr{start_addr_}, end_addr{start_addr_ + size_}, cpu_addr{cpu_addr_},
|
||||
is_allocated{is_allocated_} {}
|
||||
|
||||
constexpr VAddr StartAddr() const {
|
||||
return start_addr;
|
||||
}
|
||||
|
||||
constexpr VAddr EndAddr() const {
|
||||
return end_addr;
|
||||
}
|
||||
|
||||
constexpr std::size_t Size() const {
|
||||
return end_addr - start_addr;
|
||||
}
|
||||
|
||||
constexpr VAddr CpuAddr() const {
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
constexpr bool IsAllocated() const {
|
||||
return is_allocated;
|
||||
}
|
||||
|
||||
private:
|
||||
GPUVAddr start_addr{};
|
||||
GPUVAddr end_addr{};
|
||||
VAddr cpu_addr{};
|
||||
bool is_allocated{};
|
||||
struct VaRegion {
|
||||
u64 offset;
|
||||
u32 page_size;
|
||||
u32 _pad0_;
|
||||
u64 pages;
|
||||
};
|
||||
static_assert(sizeof(VaRegion) == 0x18);
|
||||
|
||||
private:
|
||||
struct IoctlAllocAsEx {
|
||||
u32_le flags{}; // usually passes 1
|
||||
s32_le as_fd{}; // ignored; passes 0
|
||||
@@ -96,7 +83,7 @@ private:
|
||||
struct IoctlAllocSpace {
|
||||
u32_le pages{};
|
||||
u32_le page_size{};
|
||||
AddressSpaceFlags flags{};
|
||||
MappingFlags flags{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
union {
|
||||
u64_le offset;
|
||||
@@ -113,19 +100,19 @@ private:
|
||||
static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size");
|
||||
|
||||
struct IoctlRemapEntry {
|
||||
u16_le flags{};
|
||||
u16_le kind{};
|
||||
u32_le nvmap_handle{};
|
||||
u32_le map_offset{};
|
||||
u32_le offset{};
|
||||
u32_le pages{};
|
||||
u16 flags;
|
||||
u16 kind;
|
||||
NvCore::NvMap::Handle::Id handle;
|
||||
u32 handle_offset_big_pages;
|
||||
u32 as_offset_big_pages;
|
||||
u32 big_pages;
|
||||
};
|
||||
static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size");
|
||||
|
||||
struct IoctlMapBufferEx {
|
||||
AddressSpaceFlags flags{}; // bit0: fixed_offset, bit2: cacheable
|
||||
u32_le kind{}; // -1 is default
|
||||
u32_le nvmap_handle{};
|
||||
MappingFlags flags{}; // bit0: fixed_offset, bit2: cacheable
|
||||
u32_le kind{}; // -1 is default
|
||||
NvCore::NvMap::Handle::Id handle;
|
||||
u32_le page_size{}; // 0 means don't care
|
||||
s64_le buffer_offset{};
|
||||
u64_le mapping_size{};
|
||||
@@ -143,27 +130,15 @@ private:
|
||||
};
|
||||
static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size");
|
||||
|
||||
struct IoctlVaRegion {
|
||||
u64_le offset{};
|
||||
u32_le page_size{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
u64_le pages{};
|
||||
};
|
||||
static_assert(sizeof(IoctlVaRegion) == 24, "IoctlVaRegion is incorrect size");
|
||||
|
||||
struct IoctlGetVaRegions {
|
||||
u64_le buf_addr{}; // (contained output user ptr on linux, ignored)
|
||||
u32_le buf_size{}; // forced to 2*sizeof(struct va_region)
|
||||
u32_le reserved{};
|
||||
IoctlVaRegion small{};
|
||||
IoctlVaRegion big{};
|
||||
std::array<VaRegion, 2> regions{};
|
||||
};
|
||||
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2,
|
||||
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2,
|
||||
"IoctlGetVaRegions is incorrect size");
|
||||
|
||||
s32 channel{};
|
||||
u32 big_page_size{DEFAULT_BIG_PAGE_SIZE};
|
||||
|
||||
NvResult AllocAsEx(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
@@ -172,18 +147,75 @@ private:
|
||||
NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
void GetVARegionsImpl(IoctlGetVaRegions& params);
|
||||
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output);
|
||||
|
||||
std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const;
|
||||
void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated);
|
||||
std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr);
|
||||
void FreeMappingLocked(u64 offset);
|
||||
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
Module& module;
|
||||
|
||||
// This is expected to be ordered, therefore we must use a map, not unordered_map
|
||||
std::map<GPUVAddr, BufferMap> buffer_mappings;
|
||||
NvCore::Container& container;
|
||||
NvCore::NvMap& nvmap;
|
||||
|
||||
struct Mapping {
|
||||
VAddr ptr;
|
||||
u64 offset;
|
||||
u64 size;
|
||||
bool fixed;
|
||||
bool big_page; // Only valid if fixed == false
|
||||
bool sparse_alloc;
|
||||
|
||||
Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_)
|
||||
: ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_),
|
||||
sparse_alloc(sparse_alloc_) {}
|
||||
};
|
||||
|
||||
struct Allocation {
|
||||
u64 size;
|
||||
std::list<std::shared_ptr<Mapping>> mappings;
|
||||
u32 page_size;
|
||||
bool sparse;
|
||||
bool big_pages;
|
||||
};
|
||||
|
||||
std::map<u64, std::shared_ptr<Mapping>>
|
||||
mapping_map; //!< This maps the base addresses of mapped buffers to their total sizes and
|
||||
//!< mapping type, this is needed as what was originally a single buffer may
|
||||
//!< have been split into multiple GPU side buffers with the remap flag.
|
||||
std::map<u64, Allocation> allocation_map; //!< Holds allocations created by AllocSpace from
|
||||
//!< which fixed buffers can be mapped into
|
||||
std::mutex mutex; //!< Locks all AS operations
|
||||
|
||||
struct VM {
|
||||
static constexpr u32 PAGE_SIZE{0x1000};
|
||||
static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(PAGE_SIZE)};
|
||||
|
||||
static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000};
|
||||
static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000};
|
||||
u32 big_page_size{DEFAULT_BIG_PAGE_SIZE};
|
||||
u32 big_page_size_bits{std::countr_zero(DEFAULT_BIG_PAGE_SIZE)};
|
||||
|
||||
static constexpr u32 VA_START_SHIFT{10};
|
||||
static constexpr u64 DEFAULT_VA_SPLIT{1ULL << 34};
|
||||
static constexpr u64 DEFAULT_VA_RANGE{1ULL << 37};
|
||||
u64 va_range_start{DEFAULT_BIG_PAGE_SIZE << VA_START_SHIFT};
|
||||
u64 va_range_split{DEFAULT_VA_SPLIT};
|
||||
u64 va_range_end{DEFAULT_VA_RANGE};
|
||||
|
||||
using Allocator = Common::FlatAllocator<u32, 0, 32>;
|
||||
|
||||
std::unique_ptr<Allocator> big_page_allocator;
|
||||
std::shared_ptr<Allocator>
|
||||
small_page_allocator; //! Shared as this is also used by nvhost::GpuChannel
|
||||
|
||||
bool initialised{};
|
||||
} vm;
|
||||
std::shared_ptr<Tegra::MemoryManager> gmmu;
|
||||
|
||||
// s32 channel{};
|
||||
// u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -1,24 +1,40 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#include <bit>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_writable_event.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
|
||||
SyncpointManager& syncpoint_manager_)
|
||||
: nvdevice{system_}, events_interface{events_interface_}, syncpoint_manager{
|
||||
syncpoint_manager_} {}
|
||||
nvhost_ctrl::~nvhost_ctrl() = default;
|
||||
NvCore::Container& core_)
|
||||
: nvdevice{system_}, events_interface{events_interface_}, core{core_},
|
||||
syncpoint_manager{core_.GetSyncpointManager()} {}
|
||||
|
||||
nvhost_ctrl::~nvhost_ctrl() {
|
||||
for (auto& event : events) {
|
||||
if (!event.registered) {
|
||||
continue;
|
||||
}
|
||||
events_interface.FreeEvent(event.kevent);
|
||||
}
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
@@ -30,13 +46,15 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
|
||||
case 0x1c:
|
||||
return IocCtrlClearEventWait(input, output);
|
||||
case 0x1d:
|
||||
return IocCtrlEventWait(input, output, false);
|
||||
case 0x1e:
|
||||
return IocCtrlEventWait(input, output, true);
|
||||
case 0x1e:
|
||||
return IocCtrlEventWait(input, output, false);
|
||||
case 0x1f:
|
||||
return IocCtrlEventRegister(input, output);
|
||||
case 0x20:
|
||||
return IocCtrlEventUnregister(input, output);
|
||||
case 0x21:
|
||||
return IocCtrlEventUnregisterBatch(input, output);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@@ -60,6 +78,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>&
|
||||
}
|
||||
|
||||
void nvhost_ctrl::OnOpen(DeviceFD fd) {}
|
||||
|
||||
void nvhost_ctrl::OnClose(DeviceFD fd) {}
|
||||
|
||||
NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
@@ -71,116 +90,167 @@ NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
bool is_async) {
|
||||
bool is_allocation) {
|
||||
IocCtrlEventWaitParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_async={}",
|
||||
params.syncpt_id, params.threshold, params.timeout, is_async);
|
||||
LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
|
||||
params.fence.id, params.fence.value, params.timeout, is_allocation);
|
||||
|
||||
if (params.syncpt_id >= MaxSyncPoints) {
|
||||
bool must_unmark_fail = !is_allocation;
|
||||
const u32 event_id = params.value.raw;
|
||||
SCOPE_EXIT({
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
if (must_unmark_fail) {
|
||||
events[event_id].fails = 0;
|
||||
}
|
||||
});
|
||||
|
||||
const u32 fence_id = static_cast<u32>(params.fence.id);
|
||||
|
||||
if (fence_id >= MaxSyncPoints) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
u32 event_id = params.value & 0x00FF;
|
||||
if (params.fence.value == 0) {
|
||||
if (!syncpoint_manager.IsSyncpointAllocated(params.fence.id)) {
|
||||
LOG_WARNING(Service_NVDRV,
|
||||
"Unallocated syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
|
||||
params.fence.id, params.fence.value, params.timeout, is_allocation);
|
||||
} else {
|
||||
params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id);
|
||||
}
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
if (event_id >= MaxNvEvents) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
if (syncpoint_manager.IsFenceSignalled(params.fence)) {
|
||||
params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id);
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
if (const auto new_value = syncpoint_manager.UpdateMin(fence_id);
|
||||
syncpoint_manager.IsFenceSignalled(params.fence)) {
|
||||
params.value.raw = new_value;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager();
|
||||
const u32 target_value = params.fence.value;
|
||||
|
||||
auto lock = NvEventsLock();
|
||||
|
||||
u32 slot = [&]() {
|
||||
if (is_allocation) {
|
||||
params.value.raw = 0;
|
||||
return FindFreeNvEvent(fence_id);
|
||||
} else {
|
||||
return params.value.raw;
|
||||
}
|
||||
}();
|
||||
|
||||
must_unmark_fail = false;
|
||||
|
||||
const auto check_failing = [&]() {
|
||||
if (events[slot].fails > 2) {
|
||||
{
|
||||
auto lk = system.StallProcesses();
|
||||
host1x_syncpoint_manager.WaitHost(fence_id, target_value);
|
||||
system.UnstallProcesses();
|
||||
}
|
||||
params.value.raw = target_value;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
if (slot >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
if (syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) {
|
||||
params.value = syncpoint_manager.GetSyncpointMin(params.syncpt_id);
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
events_interface.failed[event_id] = false;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
if (const auto new_value = syncpoint_manager.RefreshSyncpoint(params.syncpt_id);
|
||||
syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) {
|
||||
params.value = new_value;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
events_interface.failed[event_id] = false;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
auto& event = events_interface.events[event_id];
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
// This is mostly to take into account unimplemented features. As synced
|
||||
// gpu is always synced.
|
||||
if (!gpu.IsAsync()) {
|
||||
event.event->GetWritableEvent().Signal();
|
||||
return NvResult::Success;
|
||||
}
|
||||
const u32 current_syncpoint_value = event.fence.value;
|
||||
const s32 diff = current_syncpoint_value - params.threshold;
|
||||
if (diff >= 0) {
|
||||
event.event->GetWritableEvent().Signal();
|
||||
params.value = current_syncpoint_value;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
events_interface.failed[event_id] = false;
|
||||
return NvResult::Success;
|
||||
}
|
||||
const u32 target_value = current_syncpoint_value - diff;
|
||||
|
||||
if (!is_async) {
|
||||
params.value = 0;
|
||||
}
|
||||
|
||||
if (params.timeout == 0) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
if (check_failing()) {
|
||||
events[slot].fails = 0;
|
||||
return NvResult::Success;
|
||||
}
|
||||
return NvResult::Timeout;
|
||||
}
|
||||
|
||||
EventState status = events_interface.status[event_id];
|
||||
const bool bad_parameter = status == EventState::Busy;
|
||||
if (bad_parameter) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
auto& event = events[slot];
|
||||
|
||||
if (!event.registered) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
events_interface.SetEventStatus(event_id, EventState::Waiting);
|
||||
events_interface.assigned_syncpt[event_id] = params.syncpt_id;
|
||||
events_interface.assigned_value[event_id] = target_value;
|
||||
if (is_async) {
|
||||
params.value = params.syncpt_id << 4;
|
||||
} else {
|
||||
params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000;
|
||||
|
||||
if (event.IsBeingUsed()) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
params.value |= event_id;
|
||||
event.event->GetWritableEvent().Clear();
|
||||
if (events_interface.failed[event_id]) {
|
||||
{
|
||||
auto lk = system.StallProcesses();
|
||||
gpu.WaitFence(params.syncpt_id, target_value);
|
||||
system.UnstallProcesses();
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
events_interface.failed[event_id] = false;
|
||||
|
||||
if (check_failing()) {
|
||||
event.fails = 0;
|
||||
return NvResult::Success;
|
||||
}
|
||||
gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value);
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
|
||||
params.value.raw = 0;
|
||||
|
||||
event.status.store(EventState::Waiting, std::memory_order_release);
|
||||
event.assigned_syncpt = fence_id;
|
||||
event.assigned_value = target_value;
|
||||
if (is_allocation) {
|
||||
params.value.syncpoint_id_for_allocation.Assign(static_cast<u16>(fence_id));
|
||||
params.value.event_allocated.Assign(1);
|
||||
} else {
|
||||
params.value.syncpoint_id.Assign(fence_id);
|
||||
}
|
||||
params.value.raw |= slot;
|
||||
|
||||
event.wait_handle =
|
||||
host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() {
|
||||
auto& event_ = events[slot];
|
||||
if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) ==
|
||||
EventState::Waiting) {
|
||||
event_.kevent->GetWritableEvent().Signal();
|
||||
}
|
||||
event_.status.store(EventState::Signalled, std::memory_order_release);
|
||||
});
|
||||
return NvResult::Timeout;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::FreeEvent(u32 slot) {
|
||||
if (slot >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
auto& event = events[slot];
|
||||
|
||||
if (!event.registered) {
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
if (event.IsBeingUsed()) {
|
||||
return NvResult::Busy;
|
||||
}
|
||||
|
||||
FreeNvEvent(slot);
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocCtrlEventRegisterParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
const u32 event_id = params.user_event_id & 0x00FF;
|
||||
const u32 event_id = params.user_event_id;
|
||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
if (events_interface.registered[event_id]) {
|
||||
const auto event_state = events_interface.status[event_id];
|
||||
if (event_state != EventState::Free) {
|
||||
LOG_WARNING(Service_NVDRV, "Event already registered! Unregistering previous event");
|
||||
events_interface.UnregisterEvent(event_id);
|
||||
} else {
|
||||
return NvResult::BadParameter;
|
||||
|
||||
auto lock = NvEventsLock();
|
||||
|
||||
if (events[event_id].registered) {
|
||||
const auto result = FreeEvent(event_id);
|
||||
if (result != NvResult::Success) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
events_interface.RegisterEvent(event_id);
|
||||
CreateNvEvent(event_id);
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
@@ -190,34 +260,142 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input,
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
const u32 event_id = params.user_event_id & 0x00FF;
|
||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
|
||||
auto lock = NvEventsLock();
|
||||
return FreeEvent(event_id);
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
IocCtrlEventUnregisterBatchParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
u64 event_mask = params.user_events;
|
||||
LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask);
|
||||
|
||||
auto lock = NvEventsLock();
|
||||
while (event_mask != 0) {
|
||||
const u64 event_id = std::countr_zero(event_mask);
|
||||
event_mask &= ~(1ULL << event_id);
|
||||
const auto result = FreeEvent(static_cast<u32>(event_id));
|
||||
if (result != NvResult::Success) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
if (!events_interface.registered[event_id]) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
events_interface.UnregisterEvent(event_id);
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocCtrlEventSignalParams params{};
|
||||
IocCtrlEventClearParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
u32 event_id = params.event_id & 0x00FF;
|
||||
LOG_WARNING(Service_NVDRV, "cleared event wait on, event_id: {:X}", event_id);
|
||||
u32 event_id = params.event_id.slot;
|
||||
LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id);
|
||||
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
if (events_interface.status[event_id] == EventState::Waiting) {
|
||||
events_interface.LiberateEvent(event_id);
|
||||
}
|
||||
events_interface.failed[event_id] = true;
|
||||
|
||||
syncpoint_manager.RefreshSyncpoint(events_interface.events[event_id].fence.id);
|
||||
auto lock = NvEventsLock();
|
||||
|
||||
auto& event = events[event_id];
|
||||
if (event.status.exchange(EventState::Cancelling, std::memory_order_acq_rel) ==
|
||||
EventState::Waiting) {
|
||||
auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager();
|
||||
host1x_syncpoint_manager.DeregisterHostAction(event.assigned_syncpt, event.wait_handle);
|
||||
syncpoint_manager.UpdateMin(event.assigned_syncpt);
|
||||
event.wait_handle = {};
|
||||
}
|
||||
event.fails++;
|
||||
event.status.store(EventState::Cancelled, std::memory_order_release);
|
||||
event.kevent->GetWritableEvent().Clear();
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) {
|
||||
const auto desired_event = SyncpointEventValue{.raw = event_id};
|
||||
|
||||
const bool allocated = desired_event.event_allocated.Value() != 0;
|
||||
const u32 slot{allocated ? desired_event.partial_slot.Value()
|
||||
: static_cast<u32>(desired_event.slot)};
|
||||
if (slot >= MaxNvEvents) {
|
||||
ASSERT(false);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const u32 syncpoint_id{allocated ? desired_event.syncpoint_id_for_allocation.Value()
|
||||
: desired_event.syncpoint_id.Value()};
|
||||
|
||||
auto lock = NvEventsLock();
|
||||
|
||||
auto& event = events[slot];
|
||||
if (event.registered && event.assigned_syncpt == syncpoint_id) {
|
||||
ASSERT(event.kevent);
|
||||
return event.kevent;
|
||||
}
|
||||
// Is this possible in hardware?
|
||||
ASSERT_MSG(false, "Slot:{}, SyncpointID:{}, requested", slot, syncpoint_id);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> nvhost_ctrl::NvEventsLock() {
|
||||
return std::unique_lock<std::mutex>(events_mutex);
|
||||
}
|
||||
|
||||
void nvhost_ctrl::CreateNvEvent(u32 event_id) {
|
||||
auto& event = events[event_id];
|
||||
ASSERT(!event.kevent);
|
||||
ASSERT(!event.registered);
|
||||
ASSERT(!event.IsBeingUsed());
|
||||
event.kevent = events_interface.CreateEvent(fmt::format("NVCTRL::NvEvent_{}", event_id));
|
||||
event.status = EventState::Available;
|
||||
event.registered = true;
|
||||
const u64 mask = 1ULL << event_id;
|
||||
event.fails = 0;
|
||||
events_mask |= mask;
|
||||
event.assigned_syncpt = 0;
|
||||
}
|
||||
|
||||
void nvhost_ctrl::FreeNvEvent(u32 event_id) {
|
||||
auto& event = events[event_id];
|
||||
ASSERT(event.kevent);
|
||||
ASSERT(event.registered);
|
||||
ASSERT(!event.IsBeingUsed());
|
||||
events_interface.FreeEvent(event.kevent);
|
||||
event.kevent = nullptr;
|
||||
event.status = EventState::Available;
|
||||
event.registered = false;
|
||||
const u64 mask = ~(1ULL << event_id);
|
||||
events_mask &= mask;
|
||||
}
|
||||
|
||||
u32 nvhost_ctrl::FindFreeNvEvent(u32 syncpoint_id) {
|
||||
u32 slot{MaxNvEvents};
|
||||
u32 free_slot{MaxNvEvents};
|
||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||
auto& event = events[i];
|
||||
if (event.registered) {
|
||||
if (!event.IsBeingUsed()) {
|
||||
slot = i;
|
||||
if (event.assigned_syncpt == syncpoint_id) {
|
||||
return slot;
|
||||
}
|
||||
}
|
||||
} else if (free_slot == MaxNvEvents) {
|
||||
free_slot = i;
|
||||
}
|
||||
}
|
||||
if (free_slot < MaxNvEvents) {
|
||||
CreateNvEvent(free_slot);
|
||||
return free_slot;
|
||||
}
|
||||
|
||||
if (slot < MaxNvEvents) {
|
||||
return slot;
|
||||
}
|
||||
|
||||
LOG_CRITICAL(Service_NVDRV, "Failed to allocate an event");
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -1,20 +1,29 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
class Container;
|
||||
class SyncpointManager;
|
||||
} // namespace Service::Nvidia::NvCore
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_ctrl final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
|
||||
SyncpointManager& syncpoint_manager_);
|
||||
NvCore::Container& core);
|
||||
~nvhost_ctrl() override;
|
||||
|
||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
@@ -27,7 +36,70 @@ public:
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||
|
||||
union SyncpointEventValue {
|
||||
u32 raw;
|
||||
|
||||
union {
|
||||
BitField<0, 4, u32> partial_slot;
|
||||
BitField<4, 28, u32> syncpoint_id;
|
||||
};
|
||||
|
||||
struct {
|
||||
u16 slot;
|
||||
union {
|
||||
BitField<0, 12, u16> syncpoint_id_for_allocation;
|
||||
BitField<12, 1, u16> event_allocated;
|
||||
};
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(SyncpointEventValue) == sizeof(u32));
|
||||
|
||||
private:
|
||||
struct InternalEvent {
|
||||
// Mask representing registered events
|
||||
|
||||
// Each kernel event associated to an NV event
|
||||
Kernel::KEvent* kevent{};
|
||||
// The status of the current NVEvent
|
||||
std::atomic<EventState> status{};
|
||||
|
||||
// Tells the NVEvent that it has failed.
|
||||
u32 fails{};
|
||||
// When an NVEvent is waiting on GPU interrupt, this is the sync_point
|
||||
// associated with it.
|
||||
u32 assigned_syncpt{};
|
||||
// This is the value of the GPU interrupt for which the NVEvent is waiting
|
||||
// for.
|
||||
u32 assigned_value{};
|
||||
|
||||
// Tells if an NVEvent is registered or not
|
||||
bool registered{};
|
||||
|
||||
// Used for waiting on a syncpoint & canceling it.
|
||||
Tegra::Host1x::SyncpointManager::ActionHandle wait_handle{};
|
||||
|
||||
bool IsBeingUsed() {
|
||||
const auto current_status = status.load(std::memory_order_acquire);
|
||||
return current_status == EventState::Waiting ||
|
||||
current_status == EventState::Cancelling ||
|
||||
current_status == EventState::Signalling;
|
||||
}
|
||||
};
|
||||
|
||||
std::unique_lock<std::mutex> NvEventsLock();
|
||||
|
||||
void CreateNvEvent(u32 event_id);
|
||||
|
||||
void FreeNvEvent(u32 event_id);
|
||||
|
||||
u32 FindFreeNvEvent(u32 syncpoint_id);
|
||||
|
||||
std::array<InternalEvent, MaxNvEvents> events{};
|
||||
std::mutex events_mutex;
|
||||
u64 events_mask{};
|
||||
|
||||
struct IocSyncptReadParams {
|
||||
u32_le id{};
|
||||
u32_le value{};
|
||||
@@ -83,27 +155,18 @@ private:
|
||||
};
|
||||
static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventSignalParams {
|
||||
u32_le event_id{};
|
||||
struct IocCtrlEventClearParams {
|
||||
SyncpointEventValue event_id{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventSignalParams) == 4,
|
||||
"IocCtrlEventSignalParams is incorrect size");
|
||||
static_assert(sizeof(IocCtrlEventClearParams) == 4,
|
||||
"IocCtrlEventClearParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventWaitParams {
|
||||
u32_le syncpt_id{};
|
||||
u32_le threshold{};
|
||||
s32_le timeout{};
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventWaitParams) == 16, "IocCtrlEventWaitParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventWaitAsyncParams {
|
||||
u32_le syncpt_id{};
|
||||
u32_le threshold{};
|
||||
NvFence fence{};
|
||||
u32_le timeout{};
|
||||
u32_le value{};
|
||||
SyncpointEventValue value{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventWaitAsyncParams) == 16,
|
||||
static_assert(sizeof(IocCtrlEventWaitParams) == 16,
|
||||
"IocCtrlEventWaitAsyncParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventRegisterParams {
|
||||
@@ -118,19 +181,25 @@ private:
|
||||
static_assert(sizeof(IocCtrlEventUnregisterParams) == 4,
|
||||
"IocCtrlEventUnregisterParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventKill {
|
||||
struct IocCtrlEventUnregisterBatchParams {
|
||||
u64_le user_events{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventKill) == 8, "IocCtrlEventKill is incorrect size");
|
||||
static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8,
|
||||
"IocCtrlEventKill is incorrect size");
|
||||
|
||||
NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async);
|
||||
NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
bool is_allocation);
|
||||
NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocCtrlEventUnregisterBatch(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
NvResult FreeEvent(u32 slot);
|
||||
|
||||
EventInterface& events_interface;
|
||||
SyncpointManager& syncpoint_manager;
|
||||
NvCore::Container& core;
|
||||
NvCore::SyncpointManager& syncpoint_manager;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -7,11 +7,19 @@
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_) : nvdevice{system_} {}
|
||||
nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default;
|
||||
nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_)
|
||||
: nvdevice{system_}, events_interface{events_interface_} {
|
||||
error_notifier_event = events_interface.CreateEvent("CtrlGpuErrorNotifier");
|
||||
unknown_event = events_interface.CreateEvent("CtrlGpuUknownEvent");
|
||||
}
|
||||
nvhost_ctrl_gpu::~nvhost_ctrl_gpu() {
|
||||
events_interface.FreeEvent(error_notifier_event);
|
||||
events_interface.FreeEvent(unknown_event);
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
@@ -286,4 +294,17 @@ NvResult nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
Kernel::KEvent* nvhost_ctrl_gpu::QueryEvent(u32 event_id) {
|
||||
switch (event_id) {
|
||||
case 1:
|
||||
return error_notifier_event;
|
||||
case 2:
|
||||
return unknown_event;
|
||||
default: {
|
||||
LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id);
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -10,11 +10,15 @@
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia {
|
||||
class EventInterface;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_ctrl_gpu final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_ctrl_gpu(Core::System& system_);
|
||||
explicit nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_);
|
||||
~nvhost_ctrl_gpu() override;
|
||||
|
||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
@@ -27,6 +31,8 @@ public:
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||
|
||||
private:
|
||||
struct IoctlGpuCharacteristics {
|
||||
u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200)
|
||||
@@ -160,6 +166,12 @@ private:
|
||||
NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
EventInterface& events_interface;
|
||||
|
||||
// Events
|
||||
Kernel::KEvent* error_notifier_event;
|
||||
Kernel::KEvent* unknown_event;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -5,29 +5,46 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/control/channel_state.h"
|
||||
#include "video_core/engines/puller.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
namespace {
|
||||
Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoint_id) {
|
||||
Tegra::GPU::FenceAction result{};
|
||||
Tegra::CommandHeader BuildFenceAction(Tegra::Engines::Puller::FenceOperation op, u32 syncpoint_id) {
|
||||
Tegra::Engines::Puller::FenceAction result{};
|
||||
result.op.Assign(op);
|
||||
result.syncpoint_id.Assign(syncpoint_id);
|
||||
return {result.raw};
|
||||
}
|
||||
} // namespace
|
||||
|
||||
nvhost_gpu::nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
||||
SyncpointManager& syncpoint_manager_)
|
||||
: nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} {
|
||||
channel_fence.id = syncpoint_manager_.AllocateSyncpoint();
|
||||
channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id);
|
||||
nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
|
||||
NvCore::Container& core_)
|
||||
: nvdevice{system_}, events_interface{events_interface_}, core{core_},
|
||||
syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()},
|
||||
channel_state{system.GPU().AllocateChannel()} {
|
||||
channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
|
||||
sm_exception_breakpoint_int_report_event =
|
||||
events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt");
|
||||
sm_exception_breakpoint_pause_report_event =
|
||||
events_interface.CreateEvent("GpuChannelSMExceptionBreakpointPause");
|
||||
error_notifier_event = events_interface.CreateEvent("GpuChannelErrorNotifier");
|
||||
}
|
||||
|
||||
nvhost_gpu::~nvhost_gpu() = default;
|
||||
nvhost_gpu::~nvhost_gpu() {
|
||||
events_interface.FreeEvent(sm_exception_breakpoint_int_report_event);
|
||||
events_interface.FreeEvent(sm_exception_breakpoint_pause_report_event);
|
||||
events_interface.FreeEvent(error_notifier_event);
|
||||
syncpoint_manager.FreeSyncpoint(channel_syncpoint);
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
@@ -167,9 +184,14 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8
|
||||
params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
|
||||
params.unk3);
|
||||
|
||||
channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id);
|
||||
if (channel_state->initiated) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Already allocated!");
|
||||
return NvResult::AlreadyAllocated;
|
||||
}
|
||||
|
||||
params.fence_out = channel_fence;
|
||||
system.GPU().InitChannel(*channel_state);
|
||||
|
||||
params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
@@ -188,39 +210,37 @@ NvResult nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::ve
|
||||
|
||||
static std::vector<Tegra::CommandHeader> BuildWaitCommandList(NvFence fence) {
|
||||
return {
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
{fence.value},
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
BuildFenceAction(Tegra::GPU::FenceOperation::Acquire, fence.id),
|
||||
BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Acquire, fence.id),
|
||||
};
|
||||
}
|
||||
|
||||
static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence,
|
||||
u32 add_increment) {
|
||||
static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence) {
|
||||
std::vector<Tegra::CommandHeader> result{
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
{}};
|
||||
|
||||
for (u32 count = 0; count < add_increment; ++count) {
|
||||
result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
|
||||
for (u32 count = 0; count < 2; ++count) {
|
||||
result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1,
|
||||
Tegra::SubmissionMode::Increasing));
|
||||
result.emplace_back(BuildFenceAction(Tegra::GPU::FenceOperation::Increment, fence.id));
|
||||
result.emplace_back(
|
||||
BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Increment, fence.id));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence,
|
||||
u32 add_increment) {
|
||||
static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence) {
|
||||
std::vector<Tegra::CommandHeader> result{
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForInterrupt, 1,
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForIdle, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
{}};
|
||||
const std::vector<Tegra::CommandHeader> increment{
|
||||
BuildIncrementCommandList(fence, add_increment)};
|
||||
const std::vector<Tegra::CommandHeader> increment{BuildIncrementCommandList(fence)};
|
||||
|
||||
result.insert(result.end(), increment.begin(), increment.end());
|
||||
|
||||
@@ -234,33 +254,41 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
params.fence_out.id = channel_fence.id;
|
||||
std::scoped_lock lock(channel_mutex);
|
||||
|
||||
if (params.flags.add_wait.Value() &&
|
||||
!syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) {
|
||||
gpu.PushGPUEntries(Tegra::CommandList{BuildWaitCommandList(params.fence_out)});
|
||||
}
|
||||
const auto bind_id = channel_state->bind_id;
|
||||
|
||||
if (params.flags.add_increment.Value() || params.flags.increment.Value()) {
|
||||
const u32 increment_value = params.flags.increment.Value() ? params.fence_out.value : 0;
|
||||
params.fence_out.value = syncpoint_manager.IncreaseSyncpoint(
|
||||
params.fence_out.id, params.AddIncrementValue() + increment_value);
|
||||
} else {
|
||||
params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id);
|
||||
}
|
||||
auto& flags = params.flags;
|
||||
|
||||
gpu.PushGPUEntries(std::move(entries));
|
||||
if (flags.fence_wait.Value()) {
|
||||
if (flags.increment_value.Value()) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
if (params.flags.add_increment.Value()) {
|
||||
if (params.flags.suppress_wfi) {
|
||||
gpu.PushGPUEntries(Tegra::CommandList{
|
||||
BuildIncrementCommandList(params.fence_out, params.AddIncrementValue())});
|
||||
} else {
|
||||
gpu.PushGPUEntries(Tegra::CommandList{
|
||||
BuildIncrementWithWfiCommandList(params.fence_out, params.AddIncrementValue())});
|
||||
if (!syncpoint_manager.IsFenceSignalled(params.fence)) {
|
||||
gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildWaitCommandList(params.fence)});
|
||||
}
|
||||
}
|
||||
|
||||
gpu.PushGPUEntries(bind_id, std::move(entries));
|
||||
params.fence.id = channel_syncpoint;
|
||||
|
||||
u32 increment{(flags.fence_increment.Value() != 0 ? 2 : 0) +
|
||||
(flags.increment_value.Value() != 0 ? params.fence.value : 0)};
|
||||
params.fence.value = syncpoint_manager.IncrementSyncpointMaxExt(channel_syncpoint, increment);
|
||||
|
||||
if (flags.fence_increment.Value()) {
|
||||
if (flags.suppress_wfi.Value()) {
|
||||
gpu.PushGPUEntries(bind_id,
|
||||
Tegra::CommandList{BuildIncrementCommandList(params.fence)});
|
||||
} else {
|
||||
gpu.PushGPUEntries(bind_id,
|
||||
Tegra::CommandList{BuildIncrementWithWfiCommandList(params.fence)});
|
||||
}
|
||||
}
|
||||
|
||||
flags.raw = 0;
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo));
|
||||
return NvResult::Success;
|
||||
}
|
||||
@@ -328,4 +356,19 @@ NvResult nvhost_gpu::ChannelSetTimeslice(const std::vector<u8>& input, std::vect
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
Kernel::KEvent* nvhost_gpu::QueryEvent(u32 event_id) {
|
||||
switch (event_id) {
|
||||
case 1:
|
||||
return sm_exception_breakpoint_int_report_event;
|
||||
case 2:
|
||||
return sm_exception_breakpoint_pause_report_event;
|
||||
case 3:
|
||||
return error_notifier_event;
|
||||
default: {
|
||||
LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id);
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -13,17 +13,31 @@
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "video_core/dma_pusher.h"
|
||||
|
||||
namespace Service::Nvidia {
|
||||
class SyncpointManager;
|
||||
namespace Tegra {
|
||||
namespace Control {
|
||||
struct ChannelState;
|
||||
}
|
||||
} // namespace Tegra
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
namespace NvCore {
|
||||
class Container;
|
||||
class NvMap;
|
||||
class SyncpointManager;
|
||||
} // namespace NvCore
|
||||
|
||||
class EventInterface;
|
||||
} // namespace Service::Nvidia
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_as_gpu;
|
||||
class nvmap;
|
||||
class nvhost_gpu final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
||||
SyncpointManager& syncpoint_manager_);
|
||||
explicit nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
|
||||
NvCore::Container& core);
|
||||
~nvhost_gpu() override;
|
||||
|
||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
@@ -36,7 +50,10 @@ public:
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||
|
||||
private:
|
||||
friend class nvhost_as_gpu;
|
||||
enum class CtxObjects : u32_le {
|
||||
Ctx2D = 0x902D,
|
||||
Ctx3D = 0xB197,
|
||||
@@ -146,17 +163,13 @@ private:
|
||||
u32_le num_entries{}; // number of fence objects being submitted
|
||||
union {
|
||||
u32_le raw;
|
||||
BitField<0, 1, u32_le> add_wait; // append a wait sync_point to the list
|
||||
BitField<1, 1, u32_le> add_increment; // append an increment to the list
|
||||
BitField<2, 1, u32_le> new_hw_format; // mostly ignored
|
||||
BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt
|
||||
BitField<8, 1, u32_le> increment; // increment the returned fence
|
||||
BitField<0, 1, u32_le> fence_wait; // append a wait sync_point to the list
|
||||
BitField<1, 1, u32_le> fence_increment; // append an increment to the list
|
||||
BitField<2, 1, u32_le> new_hw_format; // mostly ignored
|
||||
BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt
|
||||
BitField<8, 1, u32_le> increment_value; // increment the returned fence
|
||||
} flags;
|
||||
NvFence fence_out{}; // returned new fence object for others to wait on
|
||||
|
||||
u32 AddIncrementValue() const {
|
||||
return flags.add_increment.Value() << 1;
|
||||
}
|
||||
NvFence fence{}; // returned new fence object for others to wait on
|
||||
};
|
||||
static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(NvFence),
|
||||
"IoctlSubmitGpfifo is incorrect size");
|
||||
@@ -191,9 +204,18 @@ private:
|
||||
NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
SyncpointManager& syncpoint_manager;
|
||||
NvFence channel_fence;
|
||||
EventInterface& events_interface;
|
||||
NvCore::Container& core;
|
||||
NvCore::SyncpointManager& syncpoint_manager;
|
||||
NvCore::NvMap& nvmap;
|
||||
std::shared_ptr<Tegra::Control::ChannelState> channel_state;
|
||||
u32 channel_syncpoint;
|
||||
std::mutex channel_mutex;
|
||||
|
||||
// Events
|
||||
Kernel::KEvent* sm_exception_breakpoint_int_report_event;
|
||||
Kernel::KEvent* sm_exception_breakpoint_pause_report_event;
|
||||
Kernel::KEvent* error_notifier_event;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -9,9 +9,10 @@
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_nvdec::nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
||||
SyncpointManager& syncpoint_manager_)
|
||||
: nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {}
|
||||
u32 nvhost_nvdec::next_id{};
|
||||
|
||||
nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_)
|
||||
: nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {}
|
||||
nvhost_nvdec::~nvhost_nvdec() = default;
|
||||
|
||||
NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
|
@@ -10,8 +10,7 @@ namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_nvdec final : public nvhost_nvdec_common {
|
||||
public:
|
||||
explicit nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
||||
SyncpointManager& syncpoint_manager_);
|
||||
explicit nvhost_nvdec(Core::System& system_, NvCore::Container& core);
|
||||
~nvhost_nvdec() override;
|
||||
|
||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
@@ -25,7 +24,7 @@ public:
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
private:
|
||||
u32 next_id{};
|
||||
static u32 next_id;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -8,10 +8,12 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
@@ -44,10 +46,17 @@ std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::s
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
||||
SyncpointManager& syncpoint_manager_)
|
||||
: nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} {}
|
||||
nvhost_nvdec_common::~nvhost_nvdec_common() = default;
|
||||
std::unordered_map<DeviceFD, u32> nvhost_nvdec_common::fd_to_id{};
|
||||
|
||||
nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_,
|
||||
NvCore::ChannelType channel_type_)
|
||||
: nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()},
|
||||
nvmap{core.GetNvMapFile()}, channel_type{channel_type_} {
|
||||
channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
|
||||
}
|
||||
nvhost_nvdec_common::~nvhost_nvdec_common() {
|
||||
syncpoint_manager.FreeSyncpoint(channel_syncpoint);
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
|
||||
IoctlSetNvmapFD params{};
|
||||
@@ -84,14 +93,14 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
|
||||
for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
|
||||
const SyncptIncr& syncpt_incr = syncpt_increments[i];
|
||||
fence_thresholds[i] =
|
||||
syncpoint_manager.IncreaseSyncpoint(syncpt_incr.id, syncpt_incr.increments);
|
||||
syncpoint_manager.IncrementSyncpointMaxExt(syncpt_incr.id, syncpt_incr.increments);
|
||||
}
|
||||
}
|
||||
for (const auto& cmd_buffer : command_buffers) {
|
||||
const auto object = nvmap_dev->GetObject(cmd_buffer.memory_id);
|
||||
const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
|
||||
ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
|
||||
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
|
||||
system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(),
|
||||
system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
|
||||
cmdlist.size() * sizeof(u32));
|
||||
gpu.PushCommandBuffer(fd_to_id[fd], cmdlist);
|
||||
}
|
||||
@@ -112,10 +121,8 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint));
|
||||
LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
|
||||
|
||||
if (device_syncpoints[params.param] == 0 && system.GPU().UseNvdec()) {
|
||||
device_syncpoints[params.param] = syncpoint_manager.AllocateSyncpoint();
|
||||
}
|
||||
params.value = device_syncpoints[params.param];
|
||||
// const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast<u32>(channel_type)]};
|
||||
params.value = channel_syncpoint;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint));
|
||||
|
||||
return NvResult::Success;
|
||||
@@ -123,6 +130,7 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve
|
||||
|
||||
NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetWaitbase params{};
|
||||
LOG_CRITICAL(Service_NVDRV, "called WAITBASE");
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase));
|
||||
params.value = 0; // Seems to be hard coded at 0
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase));
|
||||
@@ -136,28 +144,8 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
|
||||
|
||||
SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
for (auto& cmd_buffer : cmd_buffer_handles) {
|
||||
auto object{nvmap_dev->GetObject(cmd_buffer.map_handle)};
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle);
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
if (object->dma_map_addr == 0) {
|
||||
// NVDEC and VIC memory is in the 32-bit address space
|
||||
// MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space
|
||||
const GPUVAddr low_addr = gpu.MemoryManager().MapAllocate32(object->addr, object->size);
|
||||
object->dma_map_addr = static_cast<u32>(low_addr);
|
||||
// Ensure that the dma_map_addr is indeed in the lower 32-bit address space.
|
||||
ASSERT(object->dma_map_addr == low_addr);
|
||||
}
|
||||
if (!object->dma_map_addr) {
|
||||
LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size);
|
||||
} else {
|
||||
cmd_buffer.map_address = object->dma_map_addr;
|
||||
}
|
||||
cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle);
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer));
|
||||
std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(),
|
||||
@@ -167,11 +155,16 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
// This is intntionally stubbed.
|
||||
// Skip unmapping buffers here, as to not break the continuity of the VP9 reference frame
|
||||
// addresses, and risk invalidating data before the async GPU thread is done with it
|
||||
IoctlMapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer));
|
||||
std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
|
||||
|
||||
SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
|
||||
for (auto& cmd_buffer : cmd_buffer_handles) {
|
||||
nvmap.UnpinHandle(cmd_buffer.map_handle);
|
||||
}
|
||||
|
||||
std::memset(output.data(), 0, output.size());
|
||||
LOG_DEBUG(Service_NVDRV, "(STUBBED) called");
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
@@ -182,4 +175,13 @@ NvResult nvhost_nvdec_common::SetSubmitTimeout(const std::vector<u8>& input,
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Unknown HOSTX1 Event {}", event_id);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void nvhost_nvdec_common::Reset() {
|
||||
fd_to_id.clear();
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -6,20 +6,26 @@
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia {
|
||||
class SyncpointManager;
|
||||
|
||||
namespace NvCore {
|
||||
class Container;
|
||||
class NvMap;
|
||||
} // namespace NvCore
|
||||
|
||||
namespace Devices {
|
||||
class nvmap;
|
||||
|
||||
class nvhost_nvdec_common : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
||||
SyncpointManager& syncpoint_manager_);
|
||||
explicit nvhost_nvdec_common(Core::System& system_, NvCore::Container& core,
|
||||
NvCore::ChannelType channel_type);
|
||||
~nvhost_nvdec_common() override;
|
||||
|
||||
static void Reset();
|
||||
|
||||
protected:
|
||||
struct IoctlSetNvmapFD {
|
||||
s32_le nvmap_fd{};
|
||||
@@ -110,11 +116,16 @@ protected:
|
||||
NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
std::unordered_map<DeviceFD, u32> fd_to_id{};
|
||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||
|
||||
static std::unordered_map<DeviceFD, u32> fd_to_id;
|
||||
u32 channel_syncpoint;
|
||||
s32_le nvmap_fd{};
|
||||
u32_le submit_timeout{};
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
SyncpointManager& syncpoint_manager;
|
||||
NvCore::Container& core;
|
||||
NvCore::SyncpointManager& syncpoint_manager;
|
||||
NvCore::NvMap& nvmap;
|
||||
NvCore::ChannelType channel_type;
|
||||
std::array<u32, MaxSyncPoints> device_syncpoints{};
|
||||
};
|
||||
}; // namespace Devices
|
||||
|
@@ -8,9 +8,11 @@
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
nvhost_vic::nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
||||
SyncpointManager& syncpoint_manager_)
|
||||
: nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {}
|
||||
|
||||
u32 nvhost_vic::next_id{};
|
||||
|
||||
nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_)
|
||||
: nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {}
|
||||
|
||||
nvhost_vic::~nvhost_vic() = default;
|
||||
|
||||
|
@@ -9,8 +9,7 @@ namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_vic final : public nvhost_nvdec_common {
|
||||
public:
|
||||
explicit nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
||||
SyncpointManager& syncpoint_manager_);
|
||||
explicit nvhost_vic(Core::System& system_, NvCore::Container& core);
|
||||
~nvhost_vic();
|
||||
|
||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
@@ -24,6 +23,6 @@ public:
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
private:
|
||||
u32 next_id{};
|
||||
static u32 next_id;
|
||||
};
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -2,19 +2,26 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <algorithm>
|
||||
#include <bit>
|
||||
#include <cstring>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
using Core::Memory::PAGE_SIZE;
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvmap::nvmap(Core::System& system_) : nvdevice{system_} {
|
||||
// Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to
|
||||
// represent this.
|
||||
CreateObject(0);
|
||||
}
|
||||
nvmap::nvmap(Core::System& system_, NvCore::Container& container_)
|
||||
: nvdevice{system_}, container{container_}, file{container.GetNvMapFile()} {}
|
||||
|
||||
nvmap::~nvmap() = default;
|
||||
|
||||
@@ -63,38 +70,31 @@ void nvmap::OnOpen(DeviceFD fd) {}
|
||||
void nvmap::OnClose(DeviceFD fd) {}
|
||||
|
||||
VAddr nvmap::GetObjectAddress(u32 handle) const {
|
||||
auto object = GetObject(handle);
|
||||
ASSERT(object);
|
||||
ASSERT(object->status == Object::Status::Allocated);
|
||||
return object->addr;
|
||||
auto obj = file.GetHandle(handle);
|
||||
if (obj) {
|
||||
return obj->address;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvmap::CreateObject(u32 size) {
|
||||
// Create a new nvmap object and obtain a handle to it.
|
||||
auto object = std::make_shared<Object>();
|
||||
object->id = next_id++;
|
||||
object->size = size;
|
||||
object->status = Object::Status::Created;
|
||||
object->refcount = 1;
|
||||
|
||||
const u32 handle = next_handle++;
|
||||
|
||||
handles.insert_or_assign(handle, std::move(object));
|
||||
|
||||
return handle;
|
||||
std::shared_ptr<NvCore::NvMap::Handle> nvmap::GetObject(u32 handle) const {
|
||||
return file.GetHandle(handle);
|
||||
}
|
||||
|
||||
NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocCreateParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
LOG_DEBUG(Service_NVDRV, "size=0x{:08X}", params.size);
|
||||
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
|
||||
|
||||
if (!params.size) {
|
||||
LOG_ERROR(Service_NVDRV, "Size is 0");
|
||||
return NvResult::BadValue;
|
||||
std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
|
||||
auto result = file.CreateHandle(Common::AlignUp(params.size, PAGE_SIZE), handle_description);
|
||||
if (result != NvResult::Success) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Failed to create Object");
|
||||
return result;
|
||||
}
|
||||
|
||||
params.handle = CreateObject(params.size);
|
||||
handle_description->orig_size = params.size; // Orig size is the unaligned size
|
||||
params.handle = handle_description->id;
|
||||
LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
@@ -103,63 +103,68 @@ NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output)
|
||||
NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocAllocParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.addr);
|
||||
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
|
||||
|
||||
if (!params.handle) {
|
||||
LOG_ERROR(Service_NVDRV, "Handle is 0");
|
||||
LOG_CRITICAL(Service_NVDRV, "Handle is 0");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if ((params.align - 1) & params.align) {
|
||||
LOG_ERROR(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align);
|
||||
LOG_CRITICAL(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
const u32 min_alignment = 0x1000;
|
||||
if (params.align < min_alignment) {
|
||||
params.align = min_alignment;
|
||||
// Force page size alignment at a minimum
|
||||
if (params.align < PAGE_SIZE) {
|
||||
params.align = PAGE_SIZE;
|
||||
}
|
||||
|
||||
auto object = GetObject(params.handle);
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
auto handle_description{file.GetHandle(params.handle)};
|
||||
if (!handle_description) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if (object->status == Object::Status::Allocated) {
|
||||
LOG_ERROR(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle);
|
||||
if (handle_description->allocated) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle);
|
||||
return NvResult::InsufficientMemory;
|
||||
}
|
||||
|
||||
object->flags = params.flags;
|
||||
object->align = params.align;
|
||||
object->kind = params.kind;
|
||||
object->addr = params.addr;
|
||||
object->status = Object::Status::Allocated;
|
||||
|
||||
const auto result =
|
||||
handle_description->Alloc(params.flags, params.align, params.kind, params.address);
|
||||
if (result != NvResult::Success) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
|
||||
return result;
|
||||
}
|
||||
ASSERT(system.CurrentProcess()
|
||||
->PageTable()
|
||||
.LockForDeviceAddressSpace(handle_description->address, handle_description->size)
|
||||
.IsSuccess());
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
return result;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocGetIdParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "called");
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
// See the comment in FromId for extra info on this function
|
||||
if (!params.handle) {
|
||||
LOG_ERROR(Service_NVDRV, "Handle is zero");
|
||||
LOG_CRITICAL(Service_NVDRV, "Error!");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
auto object = GetObject(params.handle);
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
auto handle_description{file.GetHandle(params.handle)};
|
||||
if (!handle_description) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Error!");
|
||||
return NvResult::AccessDenied; // This will always return EPERM irrespective of if the
|
||||
// handle exists or not
|
||||
}
|
||||
|
||||
params.id = object->id;
|
||||
|
||||
params.id = handle_description->id;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
@@ -168,26 +173,29 @@ NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output)
|
||||
IocFromIdParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id);
|
||||
|
||||
auto itr = std::find_if(handles.begin(), handles.end(),
|
||||
[&](const auto& entry) { return entry.second->id == params.id; });
|
||||
if (itr == handles.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
// Handles and IDs are always the same value in nvmap however IDs can be used globally given the
|
||||
// right permissions.
|
||||
// Since we don't plan on ever supporting multiprocess we can skip implementing handle refs and
|
||||
// so this function just does simple validation and passes through the handle id.
|
||||
if (!params.id) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Zero Id is invalid!");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
auto& object = itr->second;
|
||||
if (object->status != Object::Status::Allocated) {
|
||||
LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle);
|
||||
auto handle_description{file.GetHandle(params.id)};
|
||||
if (!handle_description) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Unregistered handle!");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
itr->second->refcount++;
|
||||
|
||||
// Return the existing handle instead of creating a new one.
|
||||
params.handle = itr->first;
|
||||
|
||||
auto result = handle_description->Duplicate(false);
|
||||
if (result != NvResult::Success) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!");
|
||||
return result;
|
||||
}
|
||||
params.handle = handle_description->id;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
@@ -198,35 +206,43 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output)
|
||||
IocParamParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called type={}", params.param);
|
||||
LOG_DEBUG(Service_NVDRV, "called type={}", params.param);
|
||||
|
||||
auto object = GetObject(params.handle);
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
if (!params.handle) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Invalid handle!");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if (object->status != Object::Status::Allocated) {
|
||||
LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle);
|
||||
auto handle_description{file.GetHandle(params.handle)};
|
||||
if (!handle_description) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Not registered handle!");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
switch (static_cast<ParamTypes>(params.param)) {
|
||||
case ParamTypes::Size:
|
||||
params.result = object->size;
|
||||
switch (params.param) {
|
||||
case HandleParameterType::Size:
|
||||
params.result = static_cast<u32_le>(handle_description->orig_size);
|
||||
break;
|
||||
case ParamTypes::Alignment:
|
||||
params.result = object->align;
|
||||
case HandleParameterType::Alignment:
|
||||
params.result = static_cast<u32_le>(handle_description->align);
|
||||
break;
|
||||
case ParamTypes::Heap:
|
||||
// TODO(Subv): Seems to be a hardcoded value?
|
||||
params.result = 0x40000000;
|
||||
case HandleParameterType::Base:
|
||||
params.result = static_cast<u32_le>(-22); // posix EINVAL
|
||||
break;
|
||||
case ParamTypes::Kind:
|
||||
params.result = object->kind;
|
||||
case HandleParameterType::Heap:
|
||||
if (handle_description->allocated)
|
||||
params.result = 0x40000000;
|
||||
else
|
||||
params.result = 0;
|
||||
break;
|
||||
case HandleParameterType::Kind:
|
||||
params.result = handle_description->kind;
|
||||
break;
|
||||
case HandleParameterType::IsSharedMemMapped:
|
||||
params.result = handle_description->is_shared_mem_mapped;
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED();
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
@@ -234,46 +250,29 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output)
|
||||
}
|
||||
|
||||
NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
// TODO(Subv): These flags are unconfirmed.
|
||||
enum FreeFlags {
|
||||
Freed = 0,
|
||||
NotFreedYet = 1,
|
||||
};
|
||||
|
||||
IocFreeParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
auto itr = handles.find(params.handle);
|
||||
if (itr == handles.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
if (!itr->second->refcount) {
|
||||
LOG_ERROR(
|
||||
Service_NVDRV,
|
||||
"There is no references to this object. The object is already freed. handle={:08X}",
|
||||
params.handle);
|
||||
return NvResult::BadValue;
|
||||
if (!params.handle) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Handle null freed?");
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
itr->second->refcount--;
|
||||
|
||||
params.size = itr->second->size;
|
||||
|
||||
if (itr->second->refcount == 0) {
|
||||
params.flags = Freed;
|
||||
// The address of the nvmap is written to the output if we're finally freeing it, otherwise
|
||||
// 0 is written.
|
||||
params.address = itr->second->addr;
|
||||
if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
|
||||
ASSERT(system.CurrentProcess()
|
||||
->PageTable()
|
||||
.UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
|
||||
.IsSuccess());
|
||||
params.address = freeInfo->address;
|
||||
params.size = static_cast<u32>(freeInfo->size);
|
||||
params.flags.raw = 0;
|
||||
params.flags.map_uncached.Assign(freeInfo->was_uncached);
|
||||
} else {
|
||||
params.flags = NotFreedYet;
|
||||
params.address = 0;
|
||||
// This is possible when there's internel dups or other duplicates.
|
||||
}
|
||||
|
||||
handles.erase(params.handle);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
@@ -9,15 +9,23 @@
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
class Container;
|
||||
} // namespace Service::Nvidia::NvCore
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvmap final : public nvdevice {
|
||||
public:
|
||||
explicit nvmap(Core::System& system_);
|
||||
explicit nvmap(Core::System& system_, NvCore::Container& container);
|
||||
~nvmap() override;
|
||||
|
||||
nvmap(nvmap const&) = delete;
|
||||
nvmap& operator=(nvmap const&) = delete;
|
||||
|
||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
@@ -31,27 +39,16 @@ public:
|
||||
/// Returns the allocated address of an nvmap object given its handle.
|
||||
VAddr GetObjectAddress(u32 handle) const;
|
||||
|
||||
/// Represents an nvmap object.
|
||||
struct Object {
|
||||
enum class Status { Created, Allocated };
|
||||
u32 id;
|
||||
u32 size;
|
||||
u32 flags;
|
||||
u32 align;
|
||||
u8 kind;
|
||||
VAddr addr;
|
||||
Status status;
|
||||
u32 refcount;
|
||||
u32 dma_map_addr;
|
||||
};
|
||||
std::shared_ptr<NvCore::NvMap::Handle> GetObject(u32 handle) const;
|
||||
|
||||
std::shared_ptr<Object> GetObject(u32 handle) const {
|
||||
auto itr = handles.find(handle);
|
||||
if (itr != handles.end()) {
|
||||
return itr->second;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
enum class HandleParameterType : u32_le {
|
||||
Size = 1,
|
||||
Alignment = 2,
|
||||
Base = 3,
|
||||
Heap = 4,
|
||||
Kind = 5,
|
||||
IsSharedMemMapped = 6
|
||||
};
|
||||
|
||||
private:
|
||||
/// Id to use for the next handle that is created.
|
||||
@@ -60,9 +57,6 @@ private:
|
||||
/// Id to use for the next object that is created.
|
||||
u32 next_id = 0;
|
||||
|
||||
/// Mapping of currently allocated handles to the objects they represent.
|
||||
std::unordered_map<u32, std::shared_ptr<Object>> handles;
|
||||
|
||||
struct IocCreateParams {
|
||||
// Input
|
||||
u32_le size{};
|
||||
@@ -83,11 +77,11 @@ private:
|
||||
// Input
|
||||
u32_le handle{};
|
||||
u32_le heap_mask{};
|
||||
u32_le flags{};
|
||||
NvCore::NvMap::Handle::Flags flags{};
|
||||
u32_le align{};
|
||||
u8 kind{};
|
||||
INSERT_PADDING_BYTES(7);
|
||||
u64_le addr{};
|
||||
u64_le address{};
|
||||
};
|
||||
static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size");
|
||||
|
||||
@@ -96,14 +90,14 @@ private:
|
||||
INSERT_PADDING_BYTES(4);
|
||||
u64_le address{};
|
||||
u32_le size{};
|
||||
u32_le flags{};
|
||||
NvCore::NvMap::Handle::Flags flags{};
|
||||
};
|
||||
static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size");
|
||||
|
||||
struct IocParamParams {
|
||||
// Input
|
||||
u32_le handle{};
|
||||
u32_le param{};
|
||||
HandleParameterType param{};
|
||||
// Output
|
||||
u32_le result{};
|
||||
};
|
||||
@@ -117,14 +111,15 @@ private:
|
||||
};
|
||||
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
||||
|
||||
u32 CreateObject(u32 size);
|
||||
|
||||
NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
NvCore::Container& container;
|
||||
NvCore::NvMap& file;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@@ -1,5 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -78,11 +80,15 @@ enum class NvResult : u32 {
|
||||
ModuleNotPresent = 0xA000E,
|
||||
};
|
||||
|
||||
// obtained from
|
||||
// https://github.com/skyline-emu/skyline/blob/nvdec-dev/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost/ctrl.h#L47
|
||||
enum class EventState {
|
||||
Free = 0,
|
||||
Registered = 1,
|
||||
Waiting = 2,
|
||||
Busy = 3,
|
||||
Available = 0,
|
||||
Waiting = 1,
|
||||
Cancelling = 2,
|
||||
Signalling = 3,
|
||||
Signalled = 4,
|
||||
Cancelled = 5,
|
||||
};
|
||||
|
||||
union Ioctl {
|
||||
|
@@ -1,5 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#include <utility>
|
||||
|
||||
@@ -8,6 +10,7 @@
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_writable_event.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
||||
@@ -15,17 +18,31 @@
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv_interface.h"
|
||||
#include "core/hle/service/nvdrv/nvmemp.h"
|
||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
||||
#include "core/hle/service/nvflinger/nvflinger.h"
|
||||
#include "video_core/gpu.h"
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
EventInterface::EventInterface(Module& module_) : module{module_}, guard{}, on_signal{} {}
|
||||
|
||||
EventInterface::~EventInterface() = default;
|
||||
|
||||
Kernel::KEvent* EventInterface::CreateEvent(std::string name) {
|
||||
Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name));
|
||||
return new_event;
|
||||
}
|
||||
|
||||
void EventInterface::FreeEvent(Kernel::KEvent* event) {
|
||||
module.service_context.CloseEvent(event);
|
||||
}
|
||||
|
||||
void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger,
|
||||
Core::System& system) {
|
||||
auto module_ = std::make_shared<Module>(system);
|
||||
@@ -38,33 +55,55 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
|
||||
}
|
||||
|
||||
Module::Module(Core::System& system)
|
||||
: syncpoint_manager{system.GPU()}, service_context{system, "nvdrv"} {
|
||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||
events_interface.events[i].event =
|
||||
service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", i));
|
||||
events_interface.status[i] = EventState::Free;
|
||||
events_interface.registered[i] = false;
|
||||
}
|
||||
auto nvmap_dev = std::make_shared<Devices::nvmap>(system);
|
||||
devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, nvmap_dev);
|
||||
devices["/dev/nvhost-gpu"] =
|
||||
std::make_shared<Devices::nvhost_gpu>(system, nvmap_dev, syncpoint_manager);
|
||||
devices["/dev/nvhost-ctrl-gpu"] = std::make_shared<Devices::nvhost_ctrl_gpu>(system);
|
||||
devices["/dev/nvmap"] = nvmap_dev;
|
||||
devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, nvmap_dev);
|
||||
devices["/dev/nvhost-ctrl"] =
|
||||
std::make_shared<Devices::nvhost_ctrl>(system, events_interface, syncpoint_manager);
|
||||
devices["/dev/nvhost-nvdec"] =
|
||||
std::make_shared<Devices::nvhost_nvdec>(system, nvmap_dev, syncpoint_manager);
|
||||
devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system);
|
||||
devices["/dev/nvhost-vic"] =
|
||||
std::make_shared<Devices::nvhost_vic>(system, nvmap_dev, syncpoint_manager);
|
||||
: service_context{system, "nvdrv"}, events_interface{*this}, container{system.Host1x()} {
|
||||
builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) {
|
||||
std::shared_ptr<Devices::nvdevice> device =
|
||||
std::make_shared<Devices::nvhost_as_gpu>(system, *this, container);
|
||||
return open_files.emplace(fd, device).first;
|
||||
};
|
||||
builders["/dev/nvhost-gpu"] = [this, &system](DeviceFD fd) {
|
||||
std::shared_ptr<Devices::nvdevice> device =
|
||||
std::make_shared<Devices::nvhost_gpu>(system, events_interface, container);
|
||||
return open_files.emplace(fd, device).first;
|
||||
};
|
||||
builders["/dev/nvhost-ctrl-gpu"] = [this, &system](DeviceFD fd) {
|
||||
std::shared_ptr<Devices::nvdevice> device =
|
||||
std::make_shared<Devices::nvhost_ctrl_gpu>(system, events_interface);
|
||||
return open_files.emplace(fd, device).first;
|
||||
};
|
||||
builders["/dev/nvmap"] = [this, &system](DeviceFD fd) {
|
||||
std::shared_ptr<Devices::nvdevice> device =
|
||||
std::make_shared<Devices::nvmap>(system, container);
|
||||
return open_files.emplace(fd, device).first;
|
||||
};
|
||||
builders["/dev/nvdisp_disp0"] = [this, &system](DeviceFD fd) {
|
||||
std::shared_ptr<Devices::nvdevice> device =
|
||||
std::make_shared<Devices::nvdisp_disp0>(system, container);
|
||||
return open_files.emplace(fd, device).first;
|
||||
};
|
||||
builders["/dev/nvhost-ctrl"] = [this, &system](DeviceFD fd) {
|
||||
std::shared_ptr<Devices::nvdevice> device =
|
||||
std::make_shared<Devices::nvhost_ctrl>(system, events_interface, container);
|
||||
return open_files.emplace(fd, device).first;
|
||||
};
|
||||
builders["/dev/nvhost-nvdec"] = [this, &system](DeviceFD fd) {
|
||||
std::shared_ptr<Devices::nvdevice> device =
|
||||
std::make_shared<Devices::nvhost_nvdec>(system, container);
|
||||
return open_files.emplace(fd, device).first;
|
||||
};
|
||||
builders["/dev/nvhost-nvjpg"] = [this, &system](DeviceFD fd) {
|
||||
std::shared_ptr<Devices::nvdevice> device = std::make_shared<Devices::nvhost_nvjpg>(system);
|
||||
return open_files.emplace(fd, device).first;
|
||||
};
|
||||
builders["/dev/nvhost-vic"] = [this, &system](DeviceFD fd) {
|
||||
std::shared_ptr<Devices::nvdevice> device =
|
||||
std::make_shared<Devices::nvhost_vic>(system, container);
|
||||
return open_files.emplace(fd, device).first;
|
||||
};
|
||||
}
|
||||
|
||||
Module::~Module() {
|
||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||
service_context.CloseEvent(events_interface.events[i].event);
|
||||
}
|
||||
Devices::nvhost_nvdec_common::Reset();
|
||||
}
|
||||
|
||||
NvResult Module::VerifyFD(DeviceFD fd) const {
|
||||
@@ -82,18 +121,18 @@ NvResult Module::VerifyFD(DeviceFD fd) const {
|
||||
}
|
||||
|
||||
DeviceFD Module::Open(const std::string& device_name) {
|
||||
if (devices.find(device_name) == devices.end()) {
|
||||
auto it = builders.find(device_name);
|
||||
if (it == builders.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
|
||||
return INVALID_NVDRV_FD;
|
||||
}
|
||||
|
||||
auto device = devices[device_name];
|
||||
const DeviceFD fd = next_fd++;
|
||||
auto& builder = it->second;
|
||||
auto device = builder(fd)->second;
|
||||
|
||||
device->OnOpen(fd);
|
||||
|
||||
open_files[fd] = std::move(device);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
@@ -168,22 +207,24 @@ NvResult Module::Close(DeviceFD fd) {
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) {
|
||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||
if (events_interface.assigned_syncpt[i] == syncpoint_id &&
|
||||
events_interface.assigned_value[i] == value) {
|
||||
events_interface.LiberateEvent(i);
|
||||
events_interface.events[i].event->GetWritableEvent().Signal();
|
||||
}
|
||||
NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) {
|
||||
if (fd < 0) {
|
||||
LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
}
|
||||
|
||||
Kernel::KReadableEvent& Module::GetEvent(const u32 event_id) {
|
||||
return events_interface.events[event_id].event->GetReadableEvent();
|
||||
}
|
||||
const auto itr = open_files.find(fd);
|
||||
|
||||
Kernel::KWritableEvent& Module::GetEventWriteable(const u32 event_id) {
|
||||
return events_interface.events[event_id].event->GetWritableEvent();
|
||||
if (itr == open_files.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
event = itr->second->QueryEvent(event_id);
|
||||
if (!event) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia
|
||||
|
@@ -1,16 +1,21 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/service/kernel_helpers.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
||||
#include "core/hle/service/nvflinger/ui/fence.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
@@ -28,81 +33,31 @@ class NVFlinger;
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
namespace NvCore {
|
||||
class Container;
|
||||
class SyncpointManager;
|
||||
} // namespace NvCore
|
||||
|
||||
namespace Devices {
|
||||
class nvdevice;
|
||||
}
|
||||
class nvhost_ctrl;
|
||||
} // namespace Devices
|
||||
|
||||
/// Represents an Nvidia event
|
||||
struct NvEvent {
|
||||
Kernel::KEvent* event{};
|
||||
NvFence fence{};
|
||||
};
|
||||
class Module;
|
||||
|
||||
struct EventInterface {
|
||||
// Mask representing currently busy events
|
||||
u64 events_mask{};
|
||||
// Each kernel event associated to an NV event
|
||||
std::array<NvEvent, MaxNvEvents> events;
|
||||
// The status of the current NVEvent
|
||||
std::array<EventState, MaxNvEvents> status{};
|
||||
// Tells if an NVEvent is registered or not
|
||||
std::array<bool, MaxNvEvents> registered{};
|
||||
// Tells the NVEvent that it has failed.
|
||||
std::array<bool, MaxNvEvents> failed{};
|
||||
// When an NVEvent is waiting on GPU interrupt, this is the sync_point
|
||||
// associated with it.
|
||||
std::array<u32, MaxNvEvents> assigned_syncpt{};
|
||||
// This is the value of the GPU interrupt for which the NVEvent is waiting
|
||||
// for.
|
||||
std::array<u32, MaxNvEvents> assigned_value{};
|
||||
// Constant to denote an unasigned syncpoint.
|
||||
static constexpr u32 unassigned_syncpt = 0xFFFFFFFF;
|
||||
std::optional<u32> GetFreeEvent() const {
|
||||
u64 mask = events_mask;
|
||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||
const bool is_free = (mask & 0x1) == 0;
|
||||
if (is_free) {
|
||||
if (status[i] == EventState::Registered || status[i] == EventState::Free) {
|
||||
return {i};
|
||||
}
|
||||
}
|
||||
mask = mask >> 1;
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
void SetEventStatus(const u32 event_id, EventState new_status) {
|
||||
EventState old_status = status[event_id];
|
||||
if (old_status == new_status) {
|
||||
return;
|
||||
}
|
||||
status[event_id] = new_status;
|
||||
if (new_status == EventState::Registered) {
|
||||
registered[event_id] = true;
|
||||
}
|
||||
if (new_status == EventState::Waiting || new_status == EventState::Busy) {
|
||||
events_mask |= (1ULL << event_id);
|
||||
}
|
||||
}
|
||||
void RegisterEvent(const u32 event_id) {
|
||||
registered[event_id] = true;
|
||||
if (status[event_id] == EventState::Free) {
|
||||
status[event_id] = EventState::Registered;
|
||||
}
|
||||
}
|
||||
void UnregisterEvent(const u32 event_id) {
|
||||
registered[event_id] = false;
|
||||
if (status[event_id] == EventState::Registered) {
|
||||
status[event_id] = EventState::Free;
|
||||
}
|
||||
}
|
||||
void LiberateEvent(const u32 event_id) {
|
||||
status[event_id] = registered[event_id] ? EventState::Registered : EventState::Free;
|
||||
events_mask &= ~(1ULL << event_id);
|
||||
assigned_syncpt[event_id] = unassigned_syncpt;
|
||||
assigned_value[event_id] = 0;
|
||||
}
|
||||
class EventInterface {
|
||||
public:
|
||||
EventInterface(Module& module_);
|
||||
~EventInterface();
|
||||
|
||||
Kernel::KEvent* CreateEvent(std::string name);
|
||||
|
||||
void FreeEvent(Kernel::KEvent* event);
|
||||
|
||||
private:
|
||||
Module& module;
|
||||
std::mutex guard;
|
||||
std::list<Devices::nvhost_ctrl*> on_signal;
|
||||
};
|
||||
|
||||
class Module final {
|
||||
@@ -112,9 +67,9 @@ public:
|
||||
|
||||
/// Returns a pointer to one of the available devices, identified by its name.
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetDevice(const std::string& name) {
|
||||
auto itr = devices.find(name);
|
||||
if (itr == devices.end())
|
||||
std::shared_ptr<T> GetDevice(DeviceFD fd) {
|
||||
auto itr = open_files.find(fd);
|
||||
if (itr == open_files.end())
|
||||
return nullptr;
|
||||
return std::static_pointer_cast<T>(itr->second);
|
||||
}
|
||||
@@ -137,28 +92,28 @@ public:
|
||||
/// Closes a device file descriptor and returns operation success.
|
||||
NvResult Close(DeviceFD fd);
|
||||
|
||||
void SignalSyncpt(const u32 syncpoint_id, const u32 value);
|
||||
|
||||
Kernel::KReadableEvent& GetEvent(u32 event_id);
|
||||
|
||||
Kernel::KWritableEvent& GetEventWriteable(u32 event_id);
|
||||
NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event);
|
||||
|
||||
private:
|
||||
/// Manages syncpoints on the host
|
||||
SyncpointManager syncpoint_manager;
|
||||
friend class EventInterface;
|
||||
|
||||
/// Id to use for the next open file descriptor.
|
||||
DeviceFD next_fd = 1;
|
||||
|
||||
using FilesContainerType = std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>>;
|
||||
/// Mapping of file descriptors to the devices they reference.
|
||||
std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>> open_files;
|
||||
FilesContainerType open_files;
|
||||
|
||||
/// Mapping of device node names to their implementation.
|
||||
std::unordered_map<std::string, std::shared_ptr<Devices::nvdevice>> devices;
|
||||
KernelHelpers::ServiceContext service_context;
|
||||
|
||||
EventInterface events_interface;
|
||||
|
||||
KernelHelpers::ServiceContext service_context;
|
||||
/// Manages syncpoints on the host
|
||||
NvCore::Container container;
|
||||
|
||||
void CreateEvent(u32 event_id);
|
||||
void FreeEvent(u32 event_id);
|
||||
std::unordered_map<std::string, std::function<FilesContainerType::iterator(DeviceFD)>> builders;
|
||||
};
|
||||
|
||||
/// Registers all NVDRV services with the specified service manager.
|
||||
|
@@ -1,10 +1,13 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors
|
||||
// (https://github.com/skyline-emu/)
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||
// or any later version Refer to the license.txt file included.
|
||||
|
||||
#include <cinttypes>
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_readable_event.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
@@ -12,10 +15,6 @@
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
void NVDRV::SignalGPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
|
||||
nvdrv->SignalSyncpt(syncpoint_id, value);
|
||||
}
|
||||
|
||||
void NVDRV::Open(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
@@ -164,8 +163,7 @@ void NVDRV::Initialize(Kernel::HLERequestContext& ctx) {
|
||||
void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto fd = rp.Pop<DeviceFD>();
|
||||
const auto event_id = rp.Pop<u32>() & 0x00FF;
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}, event_id={:X}", fd, event_id);
|
||||
const auto event_id = rp.Pop<u32>();
|
||||
|
||||
if (!is_initialized) {
|
||||
ServiceError(ctx, NvResult::NotInitialized);
|
||||
@@ -173,24 +171,20 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto nv_result = nvdrv->VerifyFD(fd);
|
||||
if (nv_result != NvResult::Success) {
|
||||
LOG_ERROR(Service_NVDRV, "Invalid FD specified DeviceFD={}!", fd);
|
||||
ServiceError(ctx, nv_result);
|
||||
return;
|
||||
}
|
||||
Kernel::KEvent* event = nullptr;
|
||||
NvResult result = nvdrv->QueryEvent(fd, event_id, event);
|
||||
|
||||
if (event_id < MaxNvEvents) {
|
||||
if (result == NvResult::Success) {
|
||||
IPC::ResponseBuilder rb{ctx, 3, 1};
|
||||
rb.Push(ResultSuccess);
|
||||
auto& event = nvdrv->GetEvent(event_id);
|
||||
event.Clear();
|
||||
rb.PushCopyObjects(event);
|
||||
auto& readable_event = event->GetReadableEvent();
|
||||
rb.PushCopyObjects(readable_event);
|
||||
rb.PushEnum(NvResult::Success);
|
||||
} else {
|
||||
LOG_ERROR(Service_NVDRV, "Invalid event request!");
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.PushEnum(NvResult::BadParameter);
|
||||
rb.PushEnum(result);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -18,8 +18,6 @@ public:
|
||||
explicit NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name);
|
||||
~NVDRV() override;
|
||||
|
||||
void SignalGPUInterruptSyncpt(u32 syncpoint_id, u32 value);
|
||||
|
||||
private:
|
||||
void Open(Kernel::HLERequestContext& ctx);
|
||||
void Ioctl1(Kernel::HLERequestContext& ctx);
|
||||
|
@@ -23,6 +23,8 @@
|
||||
#include "core/hle/service/vi/display/vi_display.h"
|
||||
#include "core/hle/service/vi/layer/vi_layer.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
|
||||
namespace Service::NVFlinger {
|
||||
|
||||
@@ -95,10 +97,15 @@ NVFlinger::~NVFlinger() {
|
||||
display.GetLayer(layer).Core().NotifyShutdown();
|
||||
}
|
||||
}
|
||||
|
||||
if (nvdrv) {
|
||||
nvdrv->Close(disp_fd);
|
||||
}
|
||||
}
|
||||
|
||||
void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
|
||||
nvdrv = std::move(instance);
|
||||
disp_fd = nvdrv->Open("/dev/nvdisp_disp0");
|
||||
}
|
||||
|
||||
std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
|
||||
@@ -252,30 +259,24 @@ void NVFlinger::Compose() {
|
||||
return; // We are likely shutting down
|
||||
}
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
const auto& multi_fence = buffer.fence;
|
||||
guard->unlock();
|
||||
for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) {
|
||||
const auto& fence = multi_fence.fences[fence_id];
|
||||
gpu.WaitFence(fence.id, fence.value);
|
||||
}
|
||||
guard->lock();
|
||||
|
||||
MicroProfileFlip();
|
||||
|
||||
// Now send the buffer to the GPU for drawing.
|
||||
// TODO(Subv): Support more than just disp0. The display device selection is probably based
|
||||
// on which display we're drawing (Default, Internal, External, etc)
|
||||
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>("/dev/nvdisp_disp0");
|
||||
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
|
||||
ASSERT(nvdisp);
|
||||
|
||||
guard->unlock();
|
||||
Common::Rectangle<int> crop_rect{
|
||||
static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()),
|
||||
static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())};
|
||||
|
||||
nvdisp->flip(igbp_buffer.BufferId(), igbp_buffer.Offset(), igbp_buffer.ExternalFormat(),
|
||||
igbp_buffer.Width(), igbp_buffer.Height(), igbp_buffer.Stride(),
|
||||
static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect);
|
||||
static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect,
|
||||
buffer.fence.fences, buffer.fence.num_fences);
|
||||
|
||||
MicroProfileFlip();
|
||||
guard->lock();
|
||||
|
||||
swap_interval = buffer.swap_interval;
|
||||
|
||||
|
@@ -114,6 +114,7 @@ private:
|
||||
void SplitVSync(std::stop_token stop_token);
|
||||
|
||||
std::shared_ptr<Nvidia::Module> nvdrv;
|
||||
s32 disp_fd;
|
||||
|
||||
std::list<VI::Display> displays;
|
||||
|
||||
|
@@ -62,6 +62,7 @@ static_assert(sizeof(DisplayInfo) == 0x60, "DisplayInfo has wrong size");
|
||||
class NativeWindow final {
|
||||
public:
|
||||
constexpr explicit NativeWindow(u32 id_) : id{id_} {}
|
||||
constexpr explicit NativeWindow(const NativeWindow& other) = default;
|
||||
|
||||
private:
|
||||
const u32 magic = 2;
|
||||
|
@@ -477,6 +477,11 @@ struct Memory::Impl {
|
||||
[]() {});
|
||||
}
|
||||
|
||||
[[nodiscard]] u8* GetPointerSilent(const VAddr vaddr) const {
|
||||
return GetPointerImpl(
|
||||
vaddr, []() {}, []() {});
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a particular data type out of memory at the given virtual address.
|
||||
*
|
||||
@@ -611,6 +616,10 @@ u8* Memory::GetPointer(VAddr vaddr) {
|
||||
return impl->GetPointer(vaddr);
|
||||
}
|
||||
|
||||
u8* Memory::GetPointerSilent(VAddr vaddr) {
|
||||
return impl->GetPointerSilent(vaddr);
|
||||
}
|
||||
|
||||
const u8* Memory::GetPointer(VAddr vaddr) const {
|
||||
return impl->GetPointer(vaddr);
|
||||
}
|
||||
|
@@ -115,6 +115,7 @@ public:
|
||||
* If the address is not valid, nullptr will be returned.
|
||||
*/
|
||||
u8* GetPointer(VAddr vaddr);
|
||||
u8* GetPointerSilent(VAddr vaddr);
|
||||
|
||||
template <typename T>
|
||||
T* GetPointer(VAddr vaddr) {
|
||||
|
Reference in New Issue
Block a user