early-access version 1255
This commit is contained in:
63
src/core/hle/service/nvdrv/devices/nvdevice.h
Executable file
63
src/core/hle/service/nvdrv/devices/nvdevice.h
Executable file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
/// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to
|
||||
/// implement the ioctl interface.
|
||||
class nvdevice {
|
||||
public:
|
||||
explicit nvdevice(Core::System& system) : system{system} {}
|
||||
virtual ~nvdevice() = default;
|
||||
|
||||
/**
|
||||
* Handles an ioctl1 request.
|
||||
* @param command The ioctl command id.
|
||||
* @param input A buffer containing the input data for the ioctl.
|
||||
* @param output A buffer where the output data will be written to.
|
||||
* @returns The result code of the ioctl.
|
||||
*/
|
||||
virtual NvResult Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) = 0;
|
||||
|
||||
/**
|
||||
* Handles an ioctl2 request.
|
||||
* @param command The ioctl command id.
|
||||
* @param input A buffer containing the input data for the ioctl.
|
||||
* @param inline_input A buffer containing the input data for the ioctl which has been inlined.
|
||||
* @param output A buffer where the output data will be written to.
|
||||
* @returns The result code of the ioctl.
|
||||
*/
|
||||
virtual NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) = 0;
|
||||
|
||||
/**
|
||||
* Handles an ioctl3 request.
|
||||
* @param command The ioctl command id.
|
||||
* @param input A buffer containing the input data for the ioctl.
|
||||
* @param output A buffer where the output data will be written to.
|
||||
* @param inline_output A buffer where the inlined output data will be written to.
|
||||
* @returns The result code of the ioctl.
|
||||
*/
|
||||
virtual NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) = 0;
|
||||
|
||||
protected:
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
59
src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
Executable file
59
src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
Executable file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/perf_stats.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvdisp_disp0::nvdisp_disp0(Core::System& system, std::shared_ptr<nvmap> nvmap_dev)
|
||||
: nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {}
|
||||
nvdisp_disp0 ::~nvdisp_disp0() = default;
|
||||
|
||||
NvResult nvdisp_disp0::Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvdisp_disp0::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvdisp_disp0::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height,
|
||||
u32 stride, NVFlinger::BufferQueue::BufferTransformFlags transform,
|
||||
const Common::Rectangle<int>& crop_rect) {
|
||||
VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle);
|
||||
LOG_TRACE(Service,
|
||||
"Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
|
||||
addr, offset, width, height, stride, format);
|
||||
|
||||
using PixelFormat = Tegra::FramebufferConfig::PixelFormat;
|
||||
const Tegra::FramebufferConfig framebuffer{
|
||||
addr, offset, width, height, stride, static_cast<PixelFormat>(format),
|
||||
transform, crop_rect};
|
||||
|
||||
system.GetPerfStats().EndGameFrame();
|
||||
system.GetPerfStats().EndSystemFrame();
|
||||
system.GPU().SwapBuffers(&framebuffer);
|
||||
system.FrameLimiter().DoFrameLimiting(system.CoreTiming().GetGlobalTimeUs());
|
||||
system.GetPerfStats().BeginSystemFrame();
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
38
src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
Executable file
38
src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
Executable file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/math_util.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
#include "core/hle/service/nvflinger/buffer_queue.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvmap;
|
||||
|
||||
class nvdisp_disp0 final : public nvdevice {
|
||||
public:
|
||||
explicit nvdisp_disp0(Core::System& system, std::shared_ptr<nvmap> nvmap_dev);
|
||||
~nvdisp_disp0() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
/// Performs a screen flip, drawing the buffer pointed to by the handle.
|
||||
void flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height, u32 stride,
|
||||
NVFlinger::BufferQueue::BufferTransformFlags transform,
|
||||
const Common::Rectangle<int>& crop_rect);
|
||||
|
||||
private:
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
349
src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
Executable file
349
src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
Executable file
@@ -0,0 +1,349 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include <utility>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_as_gpu::nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev)
|
||||
: nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {}
|
||||
nvhost_as_gpu::~nvhost_as_gpu() = default;
|
||||
|
||||
NvResult nvhost_as_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 'A':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return BindChannel(input, output);
|
||||
case 0x2:
|
||||
return AllocateSpace(input, output);
|
||||
case 0x3:
|
||||
return FreeSpace(input, output);
|
||||
case 0x5:
|
||||
return UnmapBuffer(input, output);
|
||||
case 0x6:
|
||||
return MapBufferEx(input, output);
|
||||
case 0x8:
|
||||
return GetVARegions(input, output);
|
||||
case 0x9:
|
||||
return InitalizeEx(input, output);
|
||||
case 0x14:
|
||||
return Remap(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
switch (command.group) {
|
||||
case 'A':
|
||||
switch (command.cmd) {
|
||||
case 0x8:
|
||||
return GetVARegions(input, output, inline_output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlInitalizeEx params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size);
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlAllocSpace params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
|
||||
params.page_size, params.flags);
|
||||
|
||||
const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
|
||||
if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) {
|
||||
params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size);
|
||||
} else {
|
||||
params.offset = system.GPU().MemoryManager().Allocate(size, params.align);
|
||||
}
|
||||
|
||||
auto result = NvResult::Success;
|
||||
if (!params.offset) {
|
||||
LOG_CRITICAL(Service_NVDRV, "allocation failed for size {}", size);
|
||||
result = NvResult::InsufficientMemory;
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return result;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlFreeSpace params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
|
||||
params.pages, params.page_size);
|
||||
|
||||
system.GPU().MemoryManager().Unmap(params.offset,
|
||||
static_cast<std::size_t>(params.pages) * params.page_size);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
const auto num_entries = input.size() / sizeof(IoctlRemapEntry);
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
|
||||
|
||||
auto result = NvResult::Success;
|
||||
std::vector<IoctlRemapEntry> entries(num_entries);
|
||||
std::memcpy(entries.data(), input.data(), input.size());
|
||||
|
||||
for (const auto& entry : entries) {
|
||||
LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}",
|
||||
entry.offset, entry.nvmap_handle, entry.pages);
|
||||
|
||||
const auto object{nvmap_dev->GetObject(entry.nvmap_handle)};
|
||||
if (!object) {
|
||||
LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle);
|
||||
result = NvResult::InvalidState;
|
||||
break;
|
||||
}
|
||||
|
||||
const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10};
|
||||
const auto size{static_cast<u64>(entry.pages) << 0x10};
|
||||
const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10};
|
||||
const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)};
|
||||
|
||||
if (!addr) {
|
||||
LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!");
|
||||
result = NvResult::InvalidState;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), entries.data(), output.size());
|
||||
return result;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlMapBufferEx params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_DEBUG(Service_NVDRV,
|
||||
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
|
||||
", offset={}",
|
||||
params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size,
|
||||
params.offset);
|
||||
|
||||
const auto object{nvmap_dev->GetObject(params.nvmap_handle)};
|
||||
if (!object) {
|
||||
LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
|
||||
// The real nvservices doesn't make a distinction between handles and ids, and
|
||||
// object can only have one handle and it will be the same as its id. Assert that this is the
|
||||
// case to prevent unexpected behavior.
|
||||
ASSERT(object->id == params.nvmap_handle);
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
u64 page_size{params.page_size};
|
||||
if (!page_size) {
|
||||
page_size = object->align;
|
||||
}
|
||||
|
||||
if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) {
|
||||
if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) {
|
||||
const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)};
|
||||
const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)};
|
||||
|
||||
if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) {
|
||||
LOG_CRITICAL(Service_NVDRV,
|
||||
"remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, "
|
||||
"mapping_size = {}, offset={}",
|
||||
params.flags, params.nvmap_handle, params.buffer_offset,
|
||||
params.mapping_size, params.offset);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
} else {
|
||||
LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
}
|
||||
|
||||
// We can only map objects that have already been assigned a CPU address.
|
||||
ASSERT(object->status == nvmap::Object::Status::Allocated);
|
||||
|
||||
const auto physical_address{object->addr + params.buffer_offset};
|
||||
u64 size{params.mapping_size};
|
||||
if (!size) {
|
||||
size = object->size;
|
||||
}
|
||||
|
||||
const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None};
|
||||
if (is_alloc) {
|
||||
params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size);
|
||||
} else {
|
||||
params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size);
|
||||
}
|
||||
|
||||
auto result = NvResult::Success;
|
||||
if (!params.offset) {
|
||||
LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size);
|
||||
result = NvResult::InvalidState;
|
||||
} else {
|
||||
AddBufferMap(params.offset, size, physical_address, is_alloc);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return result;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlUnmapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
|
||||
|
||||
if (const auto size{RemoveBufferMap(params.offset)}; size) {
|
||||
system.GPU().MemoryManager().Unmap(params.offset, *size);
|
||||
} else {
|
||||
LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlBindChannel params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}", params.fd);
|
||||
|
||||
channel = params.fd;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetVaRegions params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||
params.buf_size);
|
||||
|
||||
params.buf_size = 0x30;
|
||||
params.regions[0].offset = 0x04000000;
|
||||
params.regions[0].page_size = 0x1000;
|
||||
params.regions[0].pages = 0x3fbfff;
|
||||
|
||||
params.regions[1].offset = 0x04000000;
|
||||
params.regions[1].page_size = 0x10000;
|
||||
params.regions[1].pages = 0x1bffff;
|
||||
|
||||
// TODO(ogniK): This probably can stay stubbed but should add support way way later
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
IoctlGetVaRegions params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||
params.buf_size);
|
||||
|
||||
params.buf_size = 0x30;
|
||||
params.regions[0].offset = 0x04000000;
|
||||
params.regions[0].page_size = 0x1000;
|
||||
params.regions[0].pages = 0x3fbfff;
|
||||
|
||||
params.regions[1].offset = 0x04000000;
|
||||
params.regions[1].page_size = 0x10000;
|
||||
params.regions[1].pages = 0x1bffff;
|
||||
|
||||
// TODO(ogniK): This probably can stay stubbed but should add support way way later
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
std::memcpy(inline_output.data(), ¶ms.regions, inline_output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
std::optional<nvhost_as_gpu::BufferMap> nvhost_as_gpu::FindBufferMap(GPUVAddr gpu_addr) const {
|
||||
const auto end{buffer_mappings.upper_bound(gpu_addr)};
|
||||
for (auto iter{buffer_mappings.begin()}; iter != end; ++iter) {
|
||||
if (gpu_addr >= iter->second.StartAddr() && gpu_addr < iter->second.EndAddr()) {
|
||||
return iter->second;
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr,
|
||||
bool is_allocated) {
|
||||
buffer_mappings[gpu_addr] = {gpu_addr, size, cpu_addr, is_allocated};
|
||||
}
|
||||
|
||||
std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) {
|
||||
if (const auto iter{buffer_mappings.find(gpu_addr)}; iter != buffer_mappings.end()) {
|
||||
std::size_t size{};
|
||||
|
||||
if (iter->second.IsAllocated()) {
|
||||
size = iter->second.Size();
|
||||
}
|
||||
|
||||
buffer_mappings.erase(iter);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
181
src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
Executable file
181
src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
Executable file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvmap;
|
||||
|
||||
enum class AddressSpaceFlags : u32 {
|
||||
None = 0x0,
|
||||
FixedOffset = 0x1,
|
||||
Remap = 0x100,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags);
|
||||
|
||||
class nvhost_as_gpu final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev);
|
||||
~nvhost_as_gpu() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
private:
|
||||
class BufferMap final {
|
||||
public:
|
||||
constexpr BufferMap() = default;
|
||||
|
||||
constexpr BufferMap(GPUVAddr start_addr, std::size_t size)
|
||||
: start_addr{start_addr}, end_addr{start_addr + size} {}
|
||||
|
||||
constexpr BufferMap(GPUVAddr start_addr, std::size_t size, VAddr cpu_addr,
|
||||
bool is_allocated)
|
||||
: start_addr{start_addr}, end_addr{start_addr + size}, cpu_addr{cpu_addr},
|
||||
is_allocated{is_allocated} {}
|
||||
|
||||
constexpr VAddr StartAddr() const {
|
||||
return start_addr;
|
||||
}
|
||||
|
||||
constexpr VAddr EndAddr() const {
|
||||
return end_addr;
|
||||
}
|
||||
|
||||
constexpr std::size_t Size() const {
|
||||
return end_addr - start_addr;
|
||||
}
|
||||
|
||||
constexpr VAddr CpuAddr() const {
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
constexpr bool IsAllocated() const {
|
||||
return is_allocated;
|
||||
}
|
||||
|
||||
private:
|
||||
GPUVAddr start_addr{};
|
||||
GPUVAddr end_addr{};
|
||||
VAddr cpu_addr{};
|
||||
bool is_allocated{};
|
||||
};
|
||||
|
||||
struct IoctlInitalizeEx {
|
||||
u32_le big_page_size{}; // depends on GPU's available_big_page_sizes; 0=default
|
||||
s32_le as_fd{}; // ignored; passes 0
|
||||
u32_le flags{}; // passes 0
|
||||
u32_le reserved{}; // ignored; passes 0
|
||||
u64_le unk0{};
|
||||
u64_le unk1{};
|
||||
u64_le unk2{};
|
||||
};
|
||||
static_assert(sizeof(IoctlInitalizeEx) == 40, "IoctlInitalizeEx is incorrect size");
|
||||
|
||||
struct IoctlAllocSpace {
|
||||
u32_le pages{};
|
||||
u32_le page_size{};
|
||||
AddressSpaceFlags flags{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
union {
|
||||
u64_le offset;
|
||||
u64_le align;
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(IoctlAllocSpace) == 24, "IoctlInitalizeEx is incorrect size");
|
||||
|
||||
struct IoctlFreeSpace {
|
||||
u64_le offset{};
|
||||
u32_le pages{};
|
||||
u32_le page_size{};
|
||||
};
|
||||
static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size");
|
||||
|
||||
struct IoctlRemapEntry {
|
||||
u16_le flags{};
|
||||
u16_le kind{};
|
||||
u32_le nvmap_handle{};
|
||||
u32_le map_offset{};
|
||||
u32_le offset{};
|
||||
u32_le pages{};
|
||||
};
|
||||
static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size");
|
||||
|
||||
struct IoctlMapBufferEx {
|
||||
AddressSpaceFlags flags{}; // bit0: fixed_offset, bit2: cacheable
|
||||
u32_le kind{}; // -1 is default
|
||||
u32_le nvmap_handle{};
|
||||
u32_le page_size{}; // 0 means don't care
|
||||
s64_le buffer_offset{};
|
||||
u64_le mapping_size{};
|
||||
s64_le offset{};
|
||||
};
|
||||
static_assert(sizeof(IoctlMapBufferEx) == 40, "IoctlMapBufferEx is incorrect size");
|
||||
|
||||
struct IoctlUnmapBuffer {
|
||||
s64_le offset{};
|
||||
};
|
||||
static_assert(sizeof(IoctlUnmapBuffer) == 8, "IoctlUnmapBuffer is incorrect size");
|
||||
|
||||
struct IoctlBindChannel {
|
||||
s32_le fd{};
|
||||
};
|
||||
static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size");
|
||||
|
||||
struct IoctlVaRegion {
|
||||
u64_le offset{};
|
||||
u32_le page_size{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
u64_le pages{};
|
||||
};
|
||||
static_assert(sizeof(IoctlVaRegion) == 24, "IoctlVaRegion is incorrect size");
|
||||
|
||||
struct IoctlGetVaRegions {
|
||||
u64_le buf_addr{}; // (contained output user ptr on linux, ignored)
|
||||
u32_le buf_size{}; // forced to 2*sizeof(struct va_region)
|
||||
u32_le reserved{};
|
||||
IoctlVaRegion regions[2]{};
|
||||
};
|
||||
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2,
|
||||
"IoctlGetVaRegions is incorrect size");
|
||||
|
||||
s32 channel{};
|
||||
|
||||
NvResult InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output);
|
||||
|
||||
std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const;
|
||||
void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated);
|
||||
std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr);
|
||||
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
|
||||
// This is expected to be ordered, therefore we must use a map, not unordered_map
|
||||
std::map<GPUVAddr, BufferMap> buffer_mappings;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
199
src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
Executable file
199
src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
Executable file
@@ -0,0 +1,199 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
|
||||
#include "video_core/gpu.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_ctrl::nvhost_ctrl(Core::System& system, EventInterface& events_interface,
|
||||
SyncpointManager& syncpoint_manager)
|
||||
: nvdevice(system), events_interface{events_interface}, syncpoint_manager{syncpoint_manager} {}
|
||||
nvhost_ctrl::~nvhost_ctrl() = default;
|
||||
|
||||
NvResult nvhost_ctrl::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x1b:
|
||||
return NvOsGetConfigU32(input, output);
|
||||
case 0x1c:
|
||||
return IocCtrlClearEventWait(input, output);
|
||||
case 0x1d:
|
||||
return IocCtrlEventWait(input, output, false);
|
||||
case 0x1e:
|
||||
return IocCtrlEventWait(input, output, true);
|
||||
case 0x1f:
|
||||
return IocCtrlEventRegister(input, output);
|
||||
case 0x20:
|
||||
return IocCtrlEventUnregister(input, output);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_outpu) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocGetConfigParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
LOG_TRACE(Service_NVDRV, "called, setting={}!{}", params.domain_str.data(),
|
||||
params.param_str.data());
|
||||
return NvResult::ConfigVarNotFound; // Returns error on production mode
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
bool is_async) {
|
||||
IocCtrlEventWaitParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_async={}",
|
||||
params.syncpt_id, params.threshold, params.timeout, is_async);
|
||||
|
||||
if (params.syncpt_id >= MaxSyncPoints) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
u32 event_id = params.value & 0x00FF;
|
||||
|
||||
if (event_id >= MaxNvEvents) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
if (syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) {
|
||||
params.value = syncpoint_manager.GetSyncpointMin(params.syncpt_id);
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
if (const auto new_value = syncpoint_manager.RefreshSyncpoint(params.syncpt_id);
|
||||
syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) {
|
||||
params.value = new_value;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
auto event = events_interface.events[event_id];
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
// This is mostly to take into account unimplemented features. As synced
|
||||
// gpu is always synced.
|
||||
if (!gpu.IsAsync()) {
|
||||
event.event.writable->Signal();
|
||||
return NvResult::Success;
|
||||
}
|
||||
auto lock = gpu.LockSync();
|
||||
const u32 current_syncpoint_value = event.fence.value;
|
||||
const s32 diff = current_syncpoint_value - params.threshold;
|
||||
if (diff >= 0) {
|
||||
event.event.writable->Signal();
|
||||
params.value = current_syncpoint_value;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
const u32 target_value = current_syncpoint_value - diff;
|
||||
|
||||
if (!is_async) {
|
||||
params.value = 0;
|
||||
}
|
||||
|
||||
if (params.timeout == 0) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Timeout;
|
||||
}
|
||||
|
||||
EventState status = events_interface.status[event_id];
|
||||
if (event_id < MaxNvEvents || status == EventState::Free || status == EventState::Registered) {
|
||||
events_interface.SetEventStatus(event_id, EventState::Waiting);
|
||||
events_interface.assigned_syncpt[event_id] = params.syncpt_id;
|
||||
events_interface.assigned_value[event_id] = target_value;
|
||||
if (is_async) {
|
||||
params.value = params.syncpt_id << 4;
|
||||
} else {
|
||||
params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000;
|
||||
}
|
||||
params.value |= event_id;
|
||||
event.event.writable->Clear();
|
||||
gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value);
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Timeout;
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocCtrlEventRegisterParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
const u32 event_id = params.user_event_id & 0x00FF;
|
||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
if (events_interface.registered[event_id]) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
events_interface.RegisterEvent(event_id);
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
IocCtrlEventUnregisterParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
const u32 event_id = params.user_event_id & 0x00FF;
|
||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
if (!events_interface.registered[event_id]) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
events_interface.UnregisterEvent(event_id);
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocCtrlEventSignalParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
u32 event_id = params.event_id & 0x00FF;
|
||||
LOG_WARNING(Service_NVDRV, "cleared event wait on, event_id: {:X}", event_id);
|
||||
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
if (events_interface.status[event_id] == EventState::Waiting) {
|
||||
events_interface.LiberateEvent(event_id);
|
||||
}
|
||||
|
||||
syncpoint_manager.RefreshSyncpoint(events_interface.events[event_id].fence.id);
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
133
src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
Executable file
133
src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
Executable file
@@ -0,0 +1,133 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_ctrl final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_ctrl(Core::System& system, EventInterface& events_interface,
|
||||
SyncpointManager& syncpoint_manager);
|
||||
~nvhost_ctrl() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
private:
|
||||
struct IocSyncptReadParams {
|
||||
u32_le id{};
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocSyncptReadParams) == 8, "IocSyncptReadParams is incorrect size");
|
||||
|
||||
struct IocSyncptIncrParams {
|
||||
u32_le id{};
|
||||
};
|
||||
static_assert(sizeof(IocSyncptIncrParams) == 4, "IocSyncptIncrParams is incorrect size");
|
||||
|
||||
struct IocSyncptWaitParams {
|
||||
u32_le id{};
|
||||
u32_le thresh{};
|
||||
s32_le timeout{};
|
||||
};
|
||||
static_assert(sizeof(IocSyncptWaitParams) == 12, "IocSyncptWaitParams is incorrect size");
|
||||
|
||||
struct IocModuleMutexParams {
|
||||
u32_le id{};
|
||||
u32_le lock{}; // (0 = unlock and 1 = lock)
|
||||
};
|
||||
static_assert(sizeof(IocModuleMutexParams) == 8, "IocModuleMutexParams is incorrect size");
|
||||
|
||||
struct IocModuleRegRDWRParams {
|
||||
u32_le id{};
|
||||
u32_le num_offsets{};
|
||||
u32_le block_size{};
|
||||
u32_le offsets{};
|
||||
u32_le values{};
|
||||
u32_le write{};
|
||||
};
|
||||
static_assert(sizeof(IocModuleRegRDWRParams) == 24, "IocModuleRegRDWRParams is incorrect size");
|
||||
|
||||
struct IocSyncptWaitexParams {
|
||||
u32_le id{};
|
||||
u32_le thresh{};
|
||||
s32_le timeout{};
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocSyncptWaitexParams) == 16, "IocSyncptWaitexParams is incorrect size");
|
||||
|
||||
struct IocSyncptReadMaxParams {
|
||||
u32_le id{};
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocSyncptReadMaxParams) == 8, "IocSyncptReadMaxParams is incorrect size");
|
||||
|
||||
struct IocGetConfigParams {
|
||||
std::array<char, 0x41> domain_str{};
|
||||
std::array<char, 0x41> param_str{};
|
||||
std::array<char, 0x101> config_str{};
|
||||
};
|
||||
static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventSignalParams {
|
||||
u32_le event_id{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventSignalParams) == 4,
|
||||
"IocCtrlEventSignalParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventWaitParams {
|
||||
u32_le syncpt_id{};
|
||||
u32_le threshold{};
|
||||
s32_le timeout{};
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventWaitParams) == 16, "IocCtrlEventWaitParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventWaitAsyncParams {
|
||||
u32_le syncpt_id{};
|
||||
u32_le threshold{};
|
||||
u32_le timeout{};
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventWaitAsyncParams) == 16,
|
||||
"IocCtrlEventWaitAsyncParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventRegisterParams {
|
||||
u32_le user_event_id{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventRegisterParams) == 4,
|
||||
"IocCtrlEventRegisterParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventUnregisterParams {
|
||||
u32_le user_event_id{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventUnregisterParams) == 4,
|
||||
"IocCtrlEventUnregisterParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventKill {
|
||||
u64_le user_events{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventKill) == 8, "IocCtrlEventKill is incorrect size");
|
||||
|
||||
NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async);
|
||||
NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
EventInterface& events_interface;
|
||||
SyncpointManager& syncpoint_manager;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
282
src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
Executable file
282
src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
Executable file
@@ -0,0 +1,282 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system) : nvdevice(system) {}
|
||||
nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default;
|
||||
|
||||
NvResult nvhost_ctrl_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 'G':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return ZCullGetCtxSize(input, output);
|
||||
case 0x2:
|
||||
return ZCullGetInfo(input, output);
|
||||
case 0x3:
|
||||
return ZBCSetTable(input, output);
|
||||
case 0x4:
|
||||
return ZBCQueryTable(input, output);
|
||||
case 0x5:
|
||||
return GetCharacteristics(input, output);
|
||||
case 0x6:
|
||||
return GetTPCMasks(input, output);
|
||||
case 0x7:
|
||||
return FlushL2(input, output);
|
||||
case 0x14:
|
||||
return GetActiveSlotMask(input, output);
|
||||
case 0x1c:
|
||||
return GetGpuTime(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::Ioctl3(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output, std::vector<u8>& inline_output) {
|
||||
switch (command.group) {
|
||||
case 'G':
|
||||
switch (command.cmd) {
|
||||
case 0x5:
|
||||
return GetCharacteristics(input, output, inline_output);
|
||||
case 0x6:
|
||||
return GetTPCMasks(input, output, inline_output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
IoctlCharacteristics params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
params.gc.arch = 0x120;
|
||||
params.gc.impl = 0xb;
|
||||
params.gc.rev = 0xa1;
|
||||
params.gc.num_gpc = 0x1;
|
||||
params.gc.l2_cache_size = 0x40000;
|
||||
params.gc.on_board_video_memory_size = 0x0;
|
||||
params.gc.num_tpc_per_gpc = 0x2;
|
||||
params.gc.bus_type = 0x20;
|
||||
params.gc.big_page_size = 0x20000;
|
||||
params.gc.compression_page_size = 0x20000;
|
||||
params.gc.pde_coverage_bit_count = 0x1B;
|
||||
params.gc.available_big_page_sizes = 0x30000;
|
||||
params.gc.gpc_mask = 0x1;
|
||||
params.gc.sm_arch_sm_version = 0x503;
|
||||
params.gc.sm_arch_spa_version = 0x503;
|
||||
params.gc.sm_arch_warp_count = 0x80;
|
||||
params.gc.gpu_va_bit_count = 0x28;
|
||||
params.gc.reserved = 0x0;
|
||||
params.gc.flags = 0x55;
|
||||
params.gc.twod_class = 0x902D;
|
||||
params.gc.threed_class = 0xB197;
|
||||
params.gc.compute_class = 0xB1C0;
|
||||
params.gc.gpfifo_class = 0xB06F;
|
||||
params.gc.inline_to_memory_class = 0xA140;
|
||||
params.gc.dma_copy_class = 0xB0B5;
|
||||
params.gc.max_fbps_count = 0x1;
|
||||
params.gc.fbp_en_mask = 0x0;
|
||||
params.gc.max_ltc_per_fbp = 0x2;
|
||||
params.gc.max_lts_per_ltc = 0x1;
|
||||
params.gc.max_tex_per_tpc = 0x0;
|
||||
params.gc.max_gpc_count = 0x1;
|
||||
params.gc.rop_l2_en_mask_0 = 0x21D70;
|
||||
params.gc.rop_l2_en_mask_1 = 0x0;
|
||||
params.gc.chipname = 0x6230326D67;
|
||||
params.gc.gr_compbit_store_base_hw = 0x0;
|
||||
params.gpu_characteristics_buf_size = 0xA0;
|
||||
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
IoctlCharacteristics params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
params.gc.arch = 0x120;
|
||||
params.gc.impl = 0xb;
|
||||
params.gc.rev = 0xa1;
|
||||
params.gc.num_gpc = 0x1;
|
||||
params.gc.l2_cache_size = 0x40000;
|
||||
params.gc.on_board_video_memory_size = 0x0;
|
||||
params.gc.num_tpc_per_gpc = 0x2;
|
||||
params.gc.bus_type = 0x20;
|
||||
params.gc.big_page_size = 0x20000;
|
||||
params.gc.compression_page_size = 0x20000;
|
||||
params.gc.pde_coverage_bit_count = 0x1B;
|
||||
params.gc.available_big_page_sizes = 0x30000;
|
||||
params.gc.gpc_mask = 0x1;
|
||||
params.gc.sm_arch_sm_version = 0x503;
|
||||
params.gc.sm_arch_spa_version = 0x503;
|
||||
params.gc.sm_arch_warp_count = 0x80;
|
||||
params.gc.gpu_va_bit_count = 0x28;
|
||||
params.gc.reserved = 0x0;
|
||||
params.gc.flags = 0x55;
|
||||
params.gc.twod_class = 0x902D;
|
||||
params.gc.threed_class = 0xB197;
|
||||
params.gc.compute_class = 0xB1C0;
|
||||
params.gc.gpfifo_class = 0xB06F;
|
||||
params.gc.inline_to_memory_class = 0xA140;
|
||||
params.gc.dma_copy_class = 0xB0B5;
|
||||
params.gc.max_fbps_count = 0x1;
|
||||
params.gc.fbp_en_mask = 0x0;
|
||||
params.gc.max_ltc_per_fbp = 0x2;
|
||||
params.gc.max_lts_per_ltc = 0x1;
|
||||
params.gc.max_tex_per_tpc = 0x0;
|
||||
params.gc.max_gpc_count = 0x1;
|
||||
params.gc.rop_l2_en_mask_0 = 0x21D70;
|
||||
params.gc.rop_l2_en_mask_1 = 0x0;
|
||||
params.gc.chipname = 0x6230326D67;
|
||||
params.gc.gr_compbit_store_base_hw = 0x0;
|
||||
params.gpu_characteristics_buf_size = 0xA0;
|
||||
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
std::memcpy(inline_output.data(), ¶ms.gc, inline_output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGpuGetTpcMasksArgs params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
|
||||
if (params.mask_buffer_size != 0) {
|
||||
params.tcp_mask = 3;
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
IoctlGpuGetTpcMasksArgs params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
|
||||
if (params.mask_buffer_size != 0) {
|
||||
params.tcp_mask = 3;
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
std::memcpy(inline_output.data(), ¶ms.tcp_mask, inline_output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetActiveSlotMask(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlActiveSlotMask params{};
|
||||
if (input.size() > 0) {
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
}
|
||||
params.slot = 0x07;
|
||||
params.mask = 0x01;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlZcullGetCtxSize params{};
|
||||
if (input.size() > 0) {
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
}
|
||||
params.size = 0x1;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::ZCullGetInfo(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlNvgpuGpuZcullGetInfoArgs params{};
|
||||
|
||||
if (input.size() > 0) {
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
}
|
||||
|
||||
params.width_align_pixels = 0x20;
|
||||
params.height_align_pixels = 0x20;
|
||||
params.pixel_squares_by_aliquots = 0x400;
|
||||
params.aliquot_total = 0x800;
|
||||
params.region_byte_multiplier = 0x20;
|
||||
params.region_header_size = 0x20;
|
||||
params.subregion_header_size = 0xc0;
|
||||
params.subregion_width_align_pixels = 0x20;
|
||||
params.subregion_height_align_pixels = 0x40;
|
||||
params.subregion_count = 0x10;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::ZBCSetTable(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
IoctlZbcSetTable params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
// TODO(ogniK): What does this even actually do?
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
IoctlZbcQueryTable params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
// TODO : To implement properly
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::FlushL2(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
IoctlFlushL2 params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
// TODO : To implement properly
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlGetGpuTime params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
params.gpu_time = static_cast<u64_le>(system.CoreTiming().GetGlobalTimeNs().count());
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
160
src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
Executable file
160
src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
Executable file
@@ -0,0 +1,160 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_ctrl_gpu final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_ctrl_gpu(Core::System& system);
|
||||
~nvhost_ctrl_gpu() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
private:
|
||||
struct IoctlGpuCharacteristics {
|
||||
u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200)
|
||||
u32_le impl; // 0xB (NVGPU_GPU_IMPL_GM20B)
|
||||
u32_le rev; // 0xA1 (Revision A1)
|
||||
u32_le num_gpc; // 0x1
|
||||
u64_le l2_cache_size; // 0x40000
|
||||
u64_le on_board_video_memory_size; // 0x0 (not used)
|
||||
u32_le num_tpc_per_gpc; // 0x2
|
||||
u32_le bus_type; // 0x20 (NVGPU_GPU_BUS_TYPE_AXI)
|
||||
u32_le big_page_size; // 0x20000
|
||||
u32_le compression_page_size; // 0x20000
|
||||
u32_le pde_coverage_bit_count; // 0x1B
|
||||
u32_le available_big_page_sizes; // 0x30000
|
||||
u32_le gpc_mask; // 0x1
|
||||
u32_le sm_arch_sm_version; // 0x503 (Maxwell Generation 5.0.3?)
|
||||
u32_le sm_arch_spa_version; // 0x503 (Maxwell Generation 5.0.3?)
|
||||
u32_le sm_arch_warp_count; // 0x80
|
||||
u32_le gpu_va_bit_count; // 0x28
|
||||
u32_le reserved; // NULL
|
||||
u64_le flags; // 0x55
|
||||
u32_le twod_class; // 0x902D (FERMI_TWOD_A)
|
||||
u32_le threed_class; // 0xB197 (MAXWELL_B)
|
||||
u32_le compute_class; // 0xB1C0 (MAXWELL_COMPUTE_B)
|
||||
u32_le gpfifo_class; // 0xB06F (MAXWELL_CHANNEL_GPFIFO_A)
|
||||
u32_le inline_to_memory_class; // 0xA140 (KEPLER_INLINE_TO_MEMORY_B)
|
||||
u32_le dma_copy_class; // 0xB0B5 (MAXWELL_DMA_COPY_A)
|
||||
u32_le max_fbps_count; // 0x1
|
||||
u32_le fbp_en_mask; // 0x0 (disabled)
|
||||
u32_le max_ltc_per_fbp; // 0x2
|
||||
u32_le max_lts_per_ltc; // 0x1
|
||||
u32_le max_tex_per_tpc; // 0x0 (not supported)
|
||||
u32_le max_gpc_count; // 0x1
|
||||
u32_le rop_l2_en_mask_0; // 0x21D70 (fuse_status_opt_rop_l2_fbp_r)
|
||||
u32_le rop_l2_en_mask_1; // 0x0
|
||||
u64_le chipname; // 0x6230326D67 ("gm20b")
|
||||
u64_le gr_compbit_store_base_hw; // 0x0 (not supported)
|
||||
};
|
||||
static_assert(sizeof(IoctlGpuCharacteristics) == 160,
|
||||
"IoctlGpuCharacteristics is incorrect size");
|
||||
|
||||
struct IoctlCharacteristics {
|
||||
u64_le gpu_characteristics_buf_size; // must not be NULL, but gets overwritten with
|
||||
// 0xA0=max_size
|
||||
u64_le gpu_characteristics_buf_addr; // ignored, but must not be NULL
|
||||
IoctlGpuCharacteristics gc;
|
||||
};
|
||||
static_assert(sizeof(IoctlCharacteristics) == 16 + sizeof(IoctlGpuCharacteristics),
|
||||
"IoctlCharacteristics is incorrect size");
|
||||
|
||||
struct IoctlGpuGetTpcMasksArgs {
|
||||
u32_le mask_buffer_size{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
u64_le mask_buffer_address{};
|
||||
u32_le tcp_mask{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
};
|
||||
static_assert(sizeof(IoctlGpuGetTpcMasksArgs) == 24,
|
||||
"IoctlGpuGetTpcMasksArgs is incorrect size");
|
||||
|
||||
struct IoctlActiveSlotMask {
|
||||
u32_le slot; // always 0x07
|
||||
u32_le mask;
|
||||
};
|
||||
static_assert(sizeof(IoctlActiveSlotMask) == 8, "IoctlActiveSlotMask is incorrect size");
|
||||
|
||||
struct IoctlZcullGetCtxSize {
|
||||
u32_le size;
|
||||
};
|
||||
static_assert(sizeof(IoctlZcullGetCtxSize) == 4, "IoctlZcullGetCtxSize is incorrect size");
|
||||
|
||||
struct IoctlNvgpuGpuZcullGetInfoArgs {
|
||||
u32_le width_align_pixels;
|
||||
u32_le height_align_pixels;
|
||||
u32_le pixel_squares_by_aliquots;
|
||||
u32_le aliquot_total;
|
||||
u32_le region_byte_multiplier;
|
||||
u32_le region_header_size;
|
||||
u32_le subregion_header_size;
|
||||
u32_le subregion_width_align_pixels;
|
||||
u32_le subregion_height_align_pixels;
|
||||
u32_le subregion_count;
|
||||
};
|
||||
static_assert(sizeof(IoctlNvgpuGpuZcullGetInfoArgs) == 40,
|
||||
"IoctlNvgpuGpuZcullGetInfoArgs is incorrect size");
|
||||
|
||||
struct IoctlZbcSetTable {
|
||||
u32_le color_ds[4];
|
||||
u32_le color_l2[4];
|
||||
u32_le depth;
|
||||
u32_le format;
|
||||
u32_le type;
|
||||
};
|
||||
static_assert(sizeof(IoctlZbcSetTable) == 44, "IoctlZbcSetTable is incorrect size");
|
||||
|
||||
struct IoctlZbcQueryTable {
|
||||
u32_le color_ds[4];
|
||||
u32_le color_l2[4];
|
||||
u32_le depth;
|
||||
u32_le ref_cnt;
|
||||
u32_le format;
|
||||
u32_le type;
|
||||
u32_le index_size;
|
||||
};
|
||||
static_assert(sizeof(IoctlZbcQueryTable) == 52, "IoctlZbcQueryTable is incorrect size");
|
||||
|
||||
struct IoctlFlushL2 {
|
||||
u32_le flush; // l2_flush | l2_invalidate << 1 | fb_flush << 2
|
||||
u32_le reserved;
|
||||
};
|
||||
static_assert(sizeof(IoctlFlushL2) == 8, "IoctlFlushL2 is incorrect size");
|
||||
|
||||
struct IoctlGetGpuTime {
|
||||
u64_le gpu_time{};
|
||||
INSERT_PADDING_WORDS(2);
|
||||
};
|
||||
static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size");
|
||||
|
||||
NvResult GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output);
|
||||
|
||||
NvResult GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output);
|
||||
|
||||
NvResult GetActiveSlotMask(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ZCullGetInfo(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ZBCSetTable(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
321
src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
Executable file
321
src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
Executable file
@@ -0,0 +1,321 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_gpu::nvhost_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev,
|
||||
SyncpointManager& syncpoint_manager)
|
||||
: nvdevice(system), nvmap_dev(std::move(nvmap_dev)), syncpoint_manager{syncpoint_manager} {
|
||||
channel_fence.id = syncpoint_manager.AllocateSyncpoint();
|
||||
channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id);
|
||||
}
|
||||
|
||||
nvhost_gpu::~nvhost_gpu() = default;
|
||||
|
||||
NvResult nvhost_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x3:
|
||||
return GetWaitbase(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'H':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return SetNVMAPfd(input, output);
|
||||
case 0x3:
|
||||
return ChannelSetTimeout(input, output);
|
||||
case 0x8:
|
||||
return SubmitGPFIFOBase(input, output, false);
|
||||
case 0x9:
|
||||
return AllocateObjectContext(input, output);
|
||||
case 0xb:
|
||||
return ZCullBind(input, output);
|
||||
case 0xc:
|
||||
return SetErrorNotifier(input, output);
|
||||
case 0xd:
|
||||
return SetChannelPriority(input, output);
|
||||
case 0x1a:
|
||||
return AllocGPFIFOEx2(input, output);
|
||||
case 0x1b:
|
||||
return SubmitGPFIFOBase(input, output, true);
|
||||
case 0x1d:
|
||||
return ChannelSetTimeslice(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'G':
|
||||
switch (command.cmd) {
|
||||
case 0x14:
|
||||
return SetClientData(input, output);
|
||||
case 0x15:
|
||||
return GetClientData(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
};
|
||||
|
||||
NvResult nvhost_gpu::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 'H':
|
||||
switch (command.cmd) {
|
||||
case 0x1b:
|
||||
return SubmitGPFIFOBase(input, inline_input, output);
|
||||
}
|
||||
break;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSetNvmapFD params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
||||
|
||||
nvmap_fd = params.nvmap_fd;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SetClientData(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlClientData params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
user_data = params.data;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::GetClientData(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlClientData params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
params.data = user_data;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::ZCullBind(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
std::memcpy(&zcull_params, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "called, gpu_va={:X}, mode={:X}", zcull_params.gpu_va,
|
||||
zcull_params.mode);
|
||||
|
||||
std::memcpy(output.data(), &zcull_params, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SetErrorNotifier(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSetErrorNotifier params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, offset={:X}, size={:X}, mem={:X}", params.offset,
|
||||
params.size, params.mem);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SetChannelPriority(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
std::memcpy(&channel_priority, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority);
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlAllocGpfifoEx2 params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_WARNING(Service_NVDRV,
|
||||
"(STUBBED) called, num_entries={:X}, flags={:X}, unk0={:X}, "
|
||||
"unk1={:X}, unk2={:X}, unk3={:X}",
|
||||
params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
|
||||
params.unk3);
|
||||
|
||||
channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id);
|
||||
|
||||
params.fence_out = channel_fence;
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlAllocObjCtx params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, class_num={:X}, flags={:X}", params.class_num,
|
||||
params.flags);
|
||||
|
||||
params.obj_id = 0x0;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
static std::vector<Tegra::CommandHeader> BuildWaitCommandList(Fence fence) {
|
||||
return {
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
{fence.value},
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
Tegra::GPU::FenceAction::Build(Tegra::GPU::FenceOperation::Acquire, fence.id),
|
||||
};
|
||||
}
|
||||
|
||||
static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(Fence fence, u32 add_increment) {
|
||||
std::vector<Tegra::CommandHeader> result{
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
{}};
|
||||
|
||||
for (u32 count = 0; count < add_increment; ++count) {
|
||||
result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
|
||||
Tegra::SubmissionMode::Increasing));
|
||||
result.emplace_back(
|
||||
Tegra::GPU::FenceAction::Build(Tegra::GPU::FenceOperation::Increment, fence.id));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(Fence fence,
|
||||
u32 add_increment) {
|
||||
std::vector<Tegra::CommandHeader> result{
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForInterrupt, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
{}};
|
||||
const std::vector<Tegra::CommandHeader> increment{
|
||||
BuildIncrementCommandList(fence, add_increment)};
|
||||
|
||||
result.insert(result.end(), increment.begin(), increment.end());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>& output,
|
||||
Tegra::CommandList&& entries) {
|
||||
LOG_TRACE(Service_NVDRV, "called, gpfifo={:X}, num_entries={:X}, flags={:X}", params.address,
|
||||
params.num_entries, params.flags.raw);
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
params.fence_out.id = channel_fence.id;
|
||||
|
||||
if (params.flags.add_wait.Value() &&
|
||||
!syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) {
|
||||
gpu.PushGPUEntries(Tegra::CommandList{BuildWaitCommandList(params.fence_out)});
|
||||
}
|
||||
|
||||
if (params.flags.add_increment.Value() || params.flags.increment.Value()) {
|
||||
const u32 increment_value = params.flags.increment.Value() ? params.fence_out.value : 0;
|
||||
params.fence_out.value = syncpoint_manager.IncreaseSyncpoint(
|
||||
params.fence_out.id, params.AddIncrementValue() + increment_value);
|
||||
} else {
|
||||
params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id);
|
||||
}
|
||||
|
||||
gpu.PushGPUEntries(std::move(entries));
|
||||
|
||||
if (params.flags.add_increment.Value()) {
|
||||
if (params.flags.suppress_wfi) {
|
||||
gpu.PushGPUEntries(Tegra::CommandList{
|
||||
BuildIncrementCommandList(params.fence_out, params.AddIncrementValue())});
|
||||
} else {
|
||||
gpu.PushGPUEntries(Tegra::CommandList{
|
||||
BuildIncrementWithWfiCommandList(params.fence_out, params.AddIncrementValue())});
|
||||
}
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SubmitGPFIFOBase(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
bool kickoff) {
|
||||
if (input.size() < sizeof(IoctlSubmitGpfifo)) {
|
||||
UNIMPLEMENTED();
|
||||
return NvResult::InvalidSize;
|
||||
}
|
||||
IoctlSubmitGpfifo params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo));
|
||||
Tegra::CommandList entries(params.num_entries);
|
||||
|
||||
if (kickoff) {
|
||||
system.Memory().ReadBlock(params.address, entries.command_lists.data(),
|
||||
params.num_entries * sizeof(Tegra::CommandListHeader));
|
||||
} else {
|
||||
std::memcpy(entries.command_lists.data(), &input[sizeof(IoctlSubmitGpfifo)],
|
||||
params.num_entries * sizeof(Tegra::CommandListHeader));
|
||||
}
|
||||
|
||||
return SubmitGPFIFOImpl(params, output, std::move(entries));
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SubmitGPFIFOBase(const std::vector<u8>& input,
|
||||
const std::vector<u8>& input_inline,
|
||||
std::vector<u8>& output) {
|
||||
if (input.size() < sizeof(IoctlSubmitGpfifo)) {
|
||||
UNIMPLEMENTED();
|
||||
return NvResult::InvalidSize;
|
||||
}
|
||||
IoctlSubmitGpfifo params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo));
|
||||
Tegra::CommandList entries(params.num_entries);
|
||||
std::memcpy(entries.command_lists.data(), input_inline.data(), input_inline.size());
|
||||
return SubmitGPFIFOImpl(params, output, std::move(entries));
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetWaitbase params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase));
|
||||
LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown);
|
||||
|
||||
params.value = 0; // Seems to be hard coded at 0
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlChannelSetTimeout params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlChannelSetTimeout));
|
||||
LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout);
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSetTimeslice params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSetTimeslice));
|
||||
LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice);
|
||||
|
||||
channel_timeslice = params.timeslice;
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
195
src/core/hle/service/nvdrv/devices/nvhost_gpu.h
Executable file
195
src/core/hle/service/nvdrv/devices/nvhost_gpu.h
Executable file
@@ -0,0 +1,195 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "video_core/dma_pusher.h"
|
||||
|
||||
namespace Service::Nvidia {
|
||||
class SyncpointManager;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvmap;
|
||||
class nvhost_gpu final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev,
|
||||
SyncpointManager& syncpoint_manager);
|
||||
~nvhost_gpu() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
private:
|
||||
enum class CtxObjects : u32_le {
|
||||
Ctx2D = 0x902D,
|
||||
Ctx3D = 0xB197,
|
||||
CtxCompute = 0xB1C0,
|
||||
CtxKepler = 0xA140,
|
||||
CtxDMA = 0xB0B5,
|
||||
CtxChannelGPFIFO = 0xB06F,
|
||||
};
|
||||
|
||||
struct IoctlSetNvmapFD {
|
||||
s32_le nvmap_fd{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSetNvmapFD) == 4, "IoctlSetNvmapFD is incorrect size");
|
||||
|
||||
struct IoctlChannelSetTimeout {
|
||||
u32_le timeout{};
|
||||
};
|
||||
static_assert(sizeof(IoctlChannelSetTimeout) == 4, "IoctlChannelSetTimeout is incorrect size");
|
||||
|
||||
struct IoctlAllocGPFIFO {
|
||||
u32_le num_entries{};
|
||||
u32_le flags{};
|
||||
};
|
||||
static_assert(sizeof(IoctlAllocGPFIFO) == 8, "IoctlAllocGPFIFO is incorrect size");
|
||||
|
||||
struct IoctlClientData {
|
||||
u64_le data{};
|
||||
};
|
||||
static_assert(sizeof(IoctlClientData) == 8, "IoctlClientData is incorrect size");
|
||||
|
||||
struct IoctlZCullBind {
|
||||
u64_le gpu_va{};
|
||||
u32_le mode{}; // 0=global, 1=no_ctxsw, 2=separate_buffer, 3=part_of_regular_buf
|
||||
INSERT_PADDING_WORDS(1);
|
||||
};
|
||||
static_assert(sizeof(IoctlZCullBind) == 16, "IoctlZCullBind is incorrect size");
|
||||
|
||||
struct IoctlSetErrorNotifier {
|
||||
u64_le offset{};
|
||||
u64_le size{};
|
||||
u32_le mem{}; // nvmap object handle
|
||||
INSERT_PADDING_WORDS(1);
|
||||
};
|
||||
static_assert(sizeof(IoctlSetErrorNotifier) == 24, "IoctlSetErrorNotifier is incorrect size");
|
||||
|
||||
struct IoctlChannelSetPriority {
|
||||
u32_le priority{};
|
||||
};
|
||||
static_assert(sizeof(IoctlChannelSetPriority) == 4,
|
||||
"IoctlChannelSetPriority is incorrect size");
|
||||
|
||||
struct IoctlSetTimeslice {
|
||||
u32_le timeslice{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSetTimeslice) == 4, "IoctlSetTimeslice is incorrect size");
|
||||
|
||||
struct IoctlEventIdControl {
|
||||
u32_le cmd{}; // 0=disable, 1=enable, 2=clear
|
||||
u32_le id{};
|
||||
};
|
||||
static_assert(sizeof(IoctlEventIdControl) == 8, "IoctlEventIdControl is incorrect size");
|
||||
|
||||
struct IoctlGetErrorNotification {
|
||||
u64_le timestamp{};
|
||||
u32_le info32{};
|
||||
u16_le info16{};
|
||||
u16_le status{}; // always 0xFFFF
|
||||
};
|
||||
static_assert(sizeof(IoctlGetErrorNotification) == 16,
|
||||
"IoctlGetErrorNotification is incorrect size");
|
||||
|
||||
static_assert(sizeof(Fence) == 8, "Fence is incorrect size");
|
||||
|
||||
struct IoctlAllocGpfifoEx {
|
||||
u32_le num_entries{};
|
||||
u32_le flags{};
|
||||
u32_le unk0{};
|
||||
u32_le unk1{};
|
||||
u32_le unk2{};
|
||||
u32_le unk3{};
|
||||
u32_le unk4{};
|
||||
u32_le unk5{};
|
||||
};
|
||||
static_assert(sizeof(IoctlAllocGpfifoEx) == 32, "IoctlAllocGpfifoEx is incorrect size");
|
||||
|
||||
struct IoctlAllocGpfifoEx2 {
|
||||
u32_le num_entries{}; // in
|
||||
u32_le flags{}; // in
|
||||
u32_le unk0{}; // in (1 works)
|
||||
Fence fence_out{}; // out
|
||||
u32_le unk1{}; // in
|
||||
u32_le unk2{}; // in
|
||||
u32_le unk3{}; // in
|
||||
};
|
||||
static_assert(sizeof(IoctlAllocGpfifoEx2) == 32, "IoctlAllocGpfifoEx2 is incorrect size");
|
||||
|
||||
struct IoctlAllocObjCtx {
|
||||
u32_le class_num{}; // 0x902D=2d, 0xB197=3d, 0xB1C0=compute, 0xA140=kepler, 0xB0B5=DMA,
|
||||
// 0xB06F=channel_gpfifo
|
||||
u32_le flags{};
|
||||
u64_le obj_id{}; // (ignored) used for FREE_OBJ_CTX ioctl, which is not supported
|
||||
};
|
||||
static_assert(sizeof(IoctlAllocObjCtx) == 16, "IoctlAllocObjCtx is incorrect size");
|
||||
|
||||
struct IoctlSubmitGpfifo {
|
||||
u64_le address{}; // pointer to gpfifo entry structs
|
||||
u32_le num_entries{}; // number of fence objects being submitted
|
||||
union {
|
||||
u32_le raw;
|
||||
BitField<0, 1, u32_le> add_wait; // append a wait sync_point to the list
|
||||
BitField<1, 1, u32_le> add_increment; // append an increment to the list
|
||||
BitField<2, 1, u32_le> new_hw_format; // mostly ignored
|
||||
BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt
|
||||
BitField<8, 1, u32_le> increment; // increment the returned fence
|
||||
} flags;
|
||||
Fence fence_out{}; // returned new fence object for others to wait on
|
||||
|
||||
u32 AddIncrementValue() const {
|
||||
return flags.add_increment.Value() << 1;
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(Fence),
|
||||
"IoctlSubmitGpfifo is incorrect size");
|
||||
|
||||
struct IoctlGetWaitbase {
|
||||
u32 unknown{}; // seems to be ignored? Nintendo added this
|
||||
u32 value{};
|
||||
};
|
||||
static_assert(sizeof(IoctlGetWaitbase) == 8, "IoctlGetWaitbase is incorrect size");
|
||||
|
||||
s32_le nvmap_fd{};
|
||||
u64_le user_data{};
|
||||
IoctlZCullBind zcull_params{};
|
||||
u32_le channel_priority{};
|
||||
u32_le channel_timeslice{};
|
||||
|
||||
NvResult SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SetClientData(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetClientData(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ZCullBind(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SetErrorNotifier(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SetChannelPriority(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult AllocateObjectContext(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>& output,
|
||||
Tegra::CommandList&& entries);
|
||||
NvResult SubmitGPFIFOBase(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
bool kickoff = false);
|
||||
NvResult SubmitGPFIFOBase(const std::vector<u8>& input, const std::vector<u8>& input_inline,
|
||||
std::vector<u8>& output);
|
||||
NvResult GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
SyncpointManager& syncpoint_manager;
|
||||
Fence channel_fence;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
73
src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
Executable file
73
src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
Executable file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_nvdec::nvhost_nvdec(Core::System& system, std::shared_ptr<nvmap> nvmap_dev)
|
||||
: nvhost_nvdec_common(system, std::move(nvmap_dev)) {}
|
||||
nvhost_nvdec::~nvhost_nvdec() = default;
|
||||
|
||||
NvResult nvhost_nvdec::Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return Submit(input, output);
|
||||
case 0x2:
|
||||
return GetSyncpoint(input, output);
|
||||
case 0x3:
|
||||
return GetWaitbase(input, output);
|
||||
case 0x7:
|
||||
return SetSubmitTimeout(input, output);
|
||||
case 0x9:
|
||||
return MapBuffer(input, output);
|
||||
case 0xa: {
|
||||
if (command.length == 0x1c) {
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
|
||||
Tegra::ChCommandHeaderList cmdlist(1);
|
||||
cmdlist[0] = Tegra::ChCommandHeader{0xDEADB33F};
|
||||
system.GPU().PushCommandBuffer(cmdlist);
|
||||
system.GPU().MemoryManager().InvalidateQueuedCaches();
|
||||
}
|
||||
return UnmapBuffer(input, output);
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'H':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return SetNVMAPfd(input);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
24
src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
Executable file
24
src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
Executable file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_nvdec final : public nvhost_nvdec_common {
|
||||
public:
|
||||
explicit nvhost_nvdec(Core::System& system, std::shared_ptr<nvmap> nvmap_dev);
|
||||
~nvhost_nvdec() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
235
src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
Executable file
235
src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
Executable file
@@ -0,0 +1,235 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
namespace {
|
||||
// Splice vectors will copy count amount of type T from the input vector into the dst vector.
|
||||
template <typename T>
|
||||
std::size_t SpliceVectors(const std::vector<u8>& input, std::vector<T>& dst, std::size_t count,
|
||||
std::size_t offset) {
|
||||
std::memcpy(dst.data(), input.data() + offset, count * sizeof(T));
|
||||
offset += count * sizeof(T);
|
||||
return offset;
|
||||
}
|
||||
|
||||
// Write vectors will write data to the output buffer
|
||||
template <typename T>
|
||||
std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::size_t offset) {
|
||||
std::memcpy(dst.data() + offset, src.data(), src.size() * sizeof(T));
|
||||
offset += src.size() * sizeof(T);
|
||||
return offset;
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system, std::shared_ptr<nvmap> nvmap_dev)
|
||||
: nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {}
|
||||
nvhost_nvdec_common::~nvhost_nvdec_common() = default;
|
||||
|
||||
NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
|
||||
IoctlSetNvmapFD params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSetNvmapFD));
|
||||
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
||||
|
||||
nvmap_fd = params.nvmap_fd;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::Submit(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSubmit params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmit));
|
||||
LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count);
|
||||
|
||||
// Instantiate param buffers
|
||||
std::size_t offset = sizeof(IoctlSubmit);
|
||||
std::vector<CommandBuffer> command_buffers(params.cmd_buffer_count);
|
||||
std::vector<Reloc> relocs(params.relocation_count);
|
||||
std::vector<u32> reloc_shifts(params.relocation_count);
|
||||
std::vector<SyncptIncr> syncpt_increments(params.syncpoint_count);
|
||||
std::vector<SyncptIncr> wait_checks(params.syncpoint_count);
|
||||
std::vector<Fence> fences(params.fence_count);
|
||||
|
||||
// Splice input into their respective buffers
|
||||
offset = SpliceVectors(input, command_buffers, params.cmd_buffer_count, offset);
|
||||
offset = SpliceVectors(input, relocs, params.relocation_count, offset);
|
||||
offset = SpliceVectors(input, reloc_shifts, params.relocation_count, offset);
|
||||
offset = SpliceVectors(input, syncpt_increments, params.syncpoint_count, offset);
|
||||
offset = SpliceVectors(input, wait_checks, params.syncpoint_count, offset);
|
||||
offset = SpliceVectors(input, fences, params.fence_count, offset);
|
||||
|
||||
// TODO(ameerj): For async gpu, utilize fences for syncpoint 'max' increment
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
for (const auto& cmd_buffer : command_buffers) {
|
||||
auto object = nvmap_dev->GetObject(cmd_buffer.memory_id);
|
||||
ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
|
||||
const auto map = FindBufferMap(object->dma_map_addr);
|
||||
if (!map) {
|
||||
LOG_ERROR(Service_NVDRV, "Tried to submit an invalid offset 0x{:X} dma 0x{:X}",
|
||||
object->addr, object->dma_map_addr);
|
||||
return NvResult::Success;
|
||||
}
|
||||
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
|
||||
gpu.MemoryManager().ReadBlock(map->StartAddr() + cmd_buffer.offset, cmdlist.data(),
|
||||
cmdlist.size() * sizeof(u32));
|
||||
gpu.PushCommandBuffer(cmdlist);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit));
|
||||
// Some games expect command_buffers to be written back
|
||||
offset = sizeof(IoctlSubmit);
|
||||
offset = WriteVectors(output, command_buffers, offset);
|
||||
offset = WriteVectors(output, relocs, offset);
|
||||
offset = WriteVectors(output, reloc_shifts, offset);
|
||||
offset = WriteVectors(output, syncpt_increments, offset);
|
||||
offset = WriteVectors(output, wait_checks, offset);
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetSyncpoint params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint));
|
||||
LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
|
||||
|
||||
// We found that implementing this causes deadlocks with async gpu, along with degraded
|
||||
// performance. TODO: RE the nvdec async implementation
|
||||
params.value = 0;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint));
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetWaitbase params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase));
|
||||
params.value = 0; // Seems to be hard coded at 0
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlMapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer));
|
||||
std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
|
||||
|
||||
SpliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
for (auto& cmf_buff : cmd_buffer_handles) {
|
||||
auto object{nvmap_dev->GetObject(cmf_buff.map_handle)};
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmf_buff.map_handle);
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
if (object->dma_map_addr == 0) {
|
||||
// NVDEC and VIC memory is in the 32-bit address space
|
||||
// MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space
|
||||
const GPUVAddr low_addr = gpu.MemoryManager().MapAllocate32(object->addr, object->size);
|
||||
object->dma_map_addr = static_cast<u32>(low_addr);
|
||||
// Ensure that the dma_map_addr is indeed in the lower 32-bit address space.
|
||||
ASSERT(object->dma_map_addr == low_addr);
|
||||
}
|
||||
if (!object->dma_map_addr) {
|
||||
LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size);
|
||||
} else {
|
||||
cmf_buff.map_address = object->dma_map_addr;
|
||||
AddBufferMap(object->dma_map_addr, object->size, object->addr,
|
||||
object->status == nvmap::Object::Status::Allocated);
|
||||
}
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer));
|
||||
std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(),
|
||||
cmd_buffer_handles.size() * sizeof(MapBufferEntry));
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlMapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer));
|
||||
std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
|
||||
SpliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
for (auto& cmf_buff : cmd_buffer_handles) {
|
||||
const auto object{nvmap_dev->GetObject(cmf_buff.map_handle)};
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmf_buff.map_handle);
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
if (const auto size{RemoveBufferMap(object->dma_map_addr)}; size) {
|
||||
if (vic_device) {
|
||||
// UnmapVicFrame defers texture_cache invalidation of the frame address until
|
||||
// the stream is over
|
||||
gpu.MemoryManager().UnmapVicFrame(object->dma_map_addr, *size);
|
||||
} else {
|
||||
gpu.MemoryManager().Unmap(object->dma_map_addr, *size);
|
||||
}
|
||||
} else {
|
||||
// This occurs quite frequently, however does not seem to impact functionality
|
||||
LOG_DEBUG(Service_NVDRV, "invalid offset=0x{:X} dma=0x{:X}", object->addr,
|
||||
object->dma_map_addr);
|
||||
}
|
||||
object->dma_map_addr = 0;
|
||||
}
|
||||
std::memset(output.data(), 0, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::SetSubmitTimeout(const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
std::memcpy(&submit_timeout, input.data(), input.size());
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
std::optional<nvhost_nvdec_common::BufferMap> nvhost_nvdec_common::FindBufferMap(
|
||||
GPUVAddr gpu_addr) const {
|
||||
const auto it = std::find_if(
|
||||
buffer_mappings.begin(), buffer_mappings.upper_bound(gpu_addr), [&](const auto& entry) {
|
||||
return (gpu_addr >= entry.second.StartAddr() && gpu_addr < entry.second.EndAddr());
|
||||
});
|
||||
|
||||
ASSERT(it != buffer_mappings.end());
|
||||
return it->second;
|
||||
}
|
||||
|
||||
void nvhost_nvdec_common::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr,
|
||||
bool is_allocated) {
|
||||
buffer_mappings.insert_or_assign(gpu_addr, BufferMap{gpu_addr, size, cpu_addr, is_allocated});
|
||||
}
|
||||
|
||||
std::optional<std::size_t> nvhost_nvdec_common::RemoveBufferMap(GPUVAddr gpu_addr) {
|
||||
const auto iter{buffer_mappings.find(gpu_addr)};
|
||||
if (iter == buffer_mappings.end()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
std::size_t size = 0;
|
||||
if (iter->second.IsAllocated()) {
|
||||
size = iter->second.Size();
|
||||
}
|
||||
buffer_mappings.erase(iter);
|
||||
return size;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
165
src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
Executable file
165
src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
Executable file
@@ -0,0 +1,165 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
class nvmap;
|
||||
|
||||
class nvhost_nvdec_common : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_nvdec_common(Core::System& system, std::shared_ptr<nvmap> nvmap_dev);
|
||||
~nvhost_nvdec_common() override;
|
||||
|
||||
protected:
|
||||
class BufferMap final {
|
||||
public:
|
||||
constexpr BufferMap() = default;
|
||||
|
||||
constexpr BufferMap(GPUVAddr start_addr, std::size_t size)
|
||||
: start_addr{start_addr}, end_addr{start_addr + size} {}
|
||||
|
||||
constexpr BufferMap(GPUVAddr start_addr, std::size_t size, VAddr cpu_addr,
|
||||
bool is_allocated)
|
||||
: start_addr{start_addr}, end_addr{start_addr + size}, cpu_addr{cpu_addr},
|
||||
is_allocated{is_allocated} {}
|
||||
|
||||
constexpr VAddr StartAddr() const {
|
||||
return start_addr;
|
||||
}
|
||||
|
||||
constexpr VAddr EndAddr() const {
|
||||
return end_addr;
|
||||
}
|
||||
|
||||
constexpr std::size_t Size() const {
|
||||
return end_addr - start_addr;
|
||||
}
|
||||
|
||||
constexpr VAddr CpuAddr() const {
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
constexpr bool IsAllocated() const {
|
||||
return is_allocated;
|
||||
}
|
||||
|
||||
private:
|
||||
GPUVAddr start_addr{};
|
||||
GPUVAddr end_addr{};
|
||||
VAddr cpu_addr{};
|
||||
bool is_allocated{};
|
||||
};
|
||||
|
||||
struct IoctlSetNvmapFD {
|
||||
s32_le nvmap_fd{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSetNvmapFD) == 4, "IoctlSetNvmapFD is incorrect size");
|
||||
|
||||
struct IoctlSubmitCommandBuffer {
|
||||
u32_le id{};
|
||||
u32_le offset{};
|
||||
u32_le count{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSubmitCommandBuffer) == 0xC,
|
||||
"IoctlSubmitCommandBuffer is incorrect size");
|
||||
struct IoctlSubmit {
|
||||
u32_le cmd_buffer_count{};
|
||||
u32_le relocation_count{};
|
||||
u32_le syncpoint_count{};
|
||||
u32_le fence_count{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSubmit) == 0x10, "IoctlSubmit has incorrect size");
|
||||
|
||||
struct CommandBuffer {
|
||||
s32 memory_id{};
|
||||
u32 offset{};
|
||||
s32 word_count{};
|
||||
};
|
||||
static_assert(sizeof(CommandBuffer) == 0xC, "CommandBuffer has incorrect size");
|
||||
|
||||
struct Reloc {
|
||||
s32 cmdbuffer_memory{};
|
||||
s32 cmdbuffer_offset{};
|
||||
s32 target{};
|
||||
s32 target_offset{};
|
||||
};
|
||||
static_assert(sizeof(Reloc) == 0x10, "CommandBuffer has incorrect size");
|
||||
|
||||
struct SyncptIncr {
|
||||
u32 id{};
|
||||
u32 increments{};
|
||||
};
|
||||
static_assert(sizeof(SyncptIncr) == 0x8, "CommandBuffer has incorrect size");
|
||||
|
||||
struct Fence {
|
||||
u32 id{};
|
||||
u32 value{};
|
||||
};
|
||||
static_assert(sizeof(Fence) == 0x8, "CommandBuffer has incorrect size");
|
||||
|
||||
struct IoctlGetSyncpoint {
|
||||
// Input
|
||||
u32_le param{};
|
||||
// Output
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IoctlGetSyncpoint) == 8, "IocGetIdParams has wrong size");
|
||||
|
||||
struct IoctlGetWaitbase {
|
||||
u32_le unknown{}; // seems to be ignored? Nintendo added this
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IoctlGetWaitbase) == 0x8, "IoctlGetWaitbase is incorrect size");
|
||||
|
||||
struct IoctlMapBuffer {
|
||||
u32_le num_entries{};
|
||||
u32_le data_address{}; // Ignored by the driver.
|
||||
u32_le attach_host_ch_das{};
|
||||
};
|
||||
static_assert(sizeof(IoctlMapBuffer) == 0x0C, "IoctlMapBuffer is incorrect size");
|
||||
|
||||
struct IocGetIdParams {
|
||||
// Input
|
||||
u32_le param{};
|
||||
// Output
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
||||
|
||||
// Used for mapping and unmapping command buffers
|
||||
struct MapBufferEntry {
|
||||
u32_le map_handle{};
|
||||
u32_le map_address{};
|
||||
};
|
||||
static_assert(sizeof(IoctlMapBuffer) == 0x0C, "IoctlMapBuffer is incorrect size");
|
||||
|
||||
/// Ioctl command implementations
|
||||
NvResult SetNVMAPfd(const std::vector<u8>& input);
|
||||
NvResult Submit(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult MapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const;
|
||||
void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated);
|
||||
std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr);
|
||||
|
||||
s32_le nvmap_fd{};
|
||||
u32_le submit_timeout{};
|
||||
bool vic_device{};
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
|
||||
// This is expected to be ordered, therefore we must use a map, not unordered_map
|
||||
std::map<GPUVAddr, BufferMap> buffer_mappings;
|
||||
};
|
||||
}; // namespace Service::Nvidia::Devices
|
56
src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
Executable file
56
src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
Executable file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_nvjpg::nvhost_nvjpg(Core::System& system) : nvdevice(system) {}
|
||||
nvhost_nvjpg::~nvhost_nvjpg() = default;
|
||||
|
||||
NvResult nvhost_nvjpg::Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 'H':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return SetNVMAPfd(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvjpg::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvjpg::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvjpg::SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSetNvmapFD params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
||||
|
||||
nvmap_fd = params.nvmap_fd;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
36
src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
Executable file
36
src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
Executable file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_nvjpg final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_nvjpg(Core::System& system);
|
||||
~nvhost_nvjpg() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
private:
|
||||
struct IoctlSetNvmapFD {
|
||||
s32_le nvmap_fd{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSetNvmapFD) == 4, "IoctlSetNvmapFD is incorrect size");
|
||||
|
||||
s32_le nvmap_fd{};
|
||||
|
||||
NvResult SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
65
src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
Executable file
65
src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
Executable file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
nvhost_vic::nvhost_vic(Core::System& system, std::shared_ptr<nvmap> nvmap_dev)
|
||||
: nvhost_nvdec_common(system, std::move(nvmap_dev)) {
|
||||
vic_device = true;
|
||||
}
|
||||
nvhost_vic::~nvhost_vic() = default;
|
||||
|
||||
NvResult nvhost_vic::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return Submit(input, output);
|
||||
case 0x2:
|
||||
return GetSyncpoint(input, output);
|
||||
case 0x3:
|
||||
return GetWaitbase(input, output);
|
||||
case 0x9:
|
||||
return MapBuffer(input, output);
|
||||
case 0xa:
|
||||
return UnmapBuffer(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'H':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return SetNVMAPfd(input);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_vic::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_vic::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
23
src/core/hle/service/nvdrv/devices/nvhost_vic.h
Executable file
23
src/core/hle/service/nvdrv/devices/nvhost_vic.h
Executable file
@@ -0,0 +1,23 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
class nvmap;
|
||||
|
||||
class nvhost_vic final : public nvhost_nvdec_common {
|
||||
public:
|
||||
explicit nvhost_vic(Core::System& system, std::shared_ptr<nvmap> nvmap_dev);
|
||||
~nvhost_vic();
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
};
|
||||
} // namespace Service::Nvidia::Devices
|
278
src/core/hle/service/nvdrv/devices/nvmap.cpp
Executable file
278
src/core/hle/service/nvdrv/devices/nvmap.cpp
Executable file
@@ -0,0 +1,278 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvmap::nvmap(Core::System& system) : nvdevice(system) {
|
||||
// Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to
|
||||
// represent this.
|
||||
CreateObject(0);
|
||||
}
|
||||
|
||||
nvmap::~nvmap() = default;
|
||||
|
||||
NvResult nvmap::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 0x1:
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return IocCreate(input, output);
|
||||
case 0x3:
|
||||
return IocFromId(input, output);
|
||||
case 0x4:
|
||||
return IocAlloc(input, output);
|
||||
case 0x5:
|
||||
return IocFree(input, output);
|
||||
case 0x9:
|
||||
return IocParam(input, output);
|
||||
case 0xe:
|
||||
return IocGetId(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvmap::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvmap::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
VAddr nvmap::GetObjectAddress(u32 handle) const {
|
||||
auto object = GetObject(handle);
|
||||
ASSERT(object);
|
||||
ASSERT(object->status == Object::Status::Allocated);
|
||||
return object->addr;
|
||||
}
|
||||
|
||||
u32 nvmap::CreateObject(u32 size) {
|
||||
// Create a new nvmap object and obtain a handle to it.
|
||||
auto object = std::make_shared<Object>();
|
||||
object->id = next_id++;
|
||||
object->size = size;
|
||||
object->status = Object::Status::Created;
|
||||
object->refcount = 1;
|
||||
|
||||
const u32 handle = next_handle++;
|
||||
|
||||
handles.insert_or_assign(handle, std::move(object));
|
||||
|
||||
return handle;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocCreateParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
LOG_DEBUG(Service_NVDRV, "size=0x{:08X}", params.size);
|
||||
|
||||
if (!params.size) {
|
||||
LOG_ERROR(Service_NVDRV, "Size is 0");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
params.handle = CreateObject(params.size);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocAllocParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.addr);
|
||||
|
||||
if (!params.handle) {
|
||||
LOG_ERROR(Service_NVDRV, "Handle is 0");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if ((params.align - 1) & params.align) {
|
||||
LOG_ERROR(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
const u32 min_alignment = 0x1000;
|
||||
if (params.align < min_alignment) {
|
||||
params.align = min_alignment;
|
||||
}
|
||||
|
||||
auto object = GetObject(params.handle);
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if (object->status == Object::Status::Allocated) {
|
||||
LOG_ERROR(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle);
|
||||
return NvResult::InsufficientMemory;
|
||||
}
|
||||
|
||||
object->flags = params.flags;
|
||||
object->align = params.align;
|
||||
object->kind = params.kind;
|
||||
object->addr = params.addr;
|
||||
object->status = Object::Status::Allocated;
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocGetIdParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "called");
|
||||
|
||||
if (!params.handle) {
|
||||
LOG_ERROR(Service_NVDRV, "Handle is zero");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
auto object = GetObject(params.handle);
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
params.id = object->id;
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocFromIdParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
auto itr = std::find_if(handles.begin(), handles.end(),
|
||||
[&](const auto& entry) { return entry.second->id == params.id; });
|
||||
if (itr == handles.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
auto& object = itr->second;
|
||||
if (object->status != Object::Status::Allocated) {
|
||||
LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
itr->second->refcount++;
|
||||
|
||||
// Return the existing handle instead of creating a new one.
|
||||
params.handle = itr->first;
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
enum class ParamTypes { Size = 1, Alignment = 2, Base = 3, Heap = 4, Kind = 5, Compr = 6 };
|
||||
|
||||
IocParamParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called type={}", params.param);
|
||||
|
||||
auto object = GetObject(params.handle);
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if (object->status != Object::Status::Allocated) {
|
||||
LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
switch (static_cast<ParamTypes>(params.param)) {
|
||||
case ParamTypes::Size:
|
||||
params.result = object->size;
|
||||
break;
|
||||
case ParamTypes::Alignment:
|
||||
params.result = object->align;
|
||||
break;
|
||||
case ParamTypes::Heap:
|
||||
// TODO(Subv): Seems to be a hardcoded value?
|
||||
params.result = 0x40000000;
|
||||
break;
|
||||
case ParamTypes::Kind:
|
||||
params.result = object->kind;
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
// TODO(Subv): These flags are unconfirmed.
|
||||
enum FreeFlags {
|
||||
Freed = 0,
|
||||
NotFreedYet = 1,
|
||||
};
|
||||
|
||||
IocFreeParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
auto itr = handles.find(params.handle);
|
||||
if (itr == handles.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
if (!itr->second->refcount) {
|
||||
LOG_ERROR(
|
||||
Service_NVDRV,
|
||||
"There is no references to this object. The object is already freed. handle={:08X}",
|
||||
params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
itr->second->refcount--;
|
||||
|
||||
params.size = itr->second->size;
|
||||
|
||||
if (itr->second->refcount == 0) {
|
||||
params.flags = Freed;
|
||||
// The address of the nvmap is written to the output if we're finally freeing it, otherwise
|
||||
// 0 is written.
|
||||
params.address = itr->second->addr;
|
||||
} else {
|
||||
params.flags = NotFreedYet;
|
||||
params.address = 0;
|
||||
}
|
||||
|
||||
handles.erase(params.handle);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
127
src/core/hle/service/nvdrv/devices/nvmap.h
Executable file
127
src/core/hle/service/nvdrv/devices/nvmap.h
Executable file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvmap final : public nvdevice {
|
||||
public:
|
||||
explicit nvmap(Core::System& system);
|
||||
~nvmap() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
/// Returns the allocated address of an nvmap object given its handle.
|
||||
VAddr GetObjectAddress(u32 handle) const;
|
||||
|
||||
/// Represents an nvmap object.
|
||||
struct Object {
|
||||
enum class Status { Created, Allocated };
|
||||
u32 id;
|
||||
u32 size;
|
||||
u32 flags;
|
||||
u32 align;
|
||||
u8 kind;
|
||||
VAddr addr;
|
||||
Status status;
|
||||
u32 refcount;
|
||||
u32 dma_map_addr;
|
||||
};
|
||||
|
||||
std::shared_ptr<Object> GetObject(u32 handle) const {
|
||||
auto itr = handles.find(handle);
|
||||
if (itr != handles.end()) {
|
||||
return itr->second;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
/// Id to use for the next handle that is created.
|
||||
u32 next_handle = 0;
|
||||
|
||||
/// Id to use for the next object that is created.
|
||||
u32 next_id = 0;
|
||||
|
||||
/// Mapping of currently allocated handles to the objects they represent.
|
||||
std::unordered_map<u32, std::shared_ptr<Object>> handles;
|
||||
|
||||
struct IocCreateParams {
|
||||
// Input
|
||||
u32_le size{};
|
||||
// Output
|
||||
u32_le handle{};
|
||||
};
|
||||
static_assert(sizeof(IocCreateParams) == 8, "IocCreateParams has wrong size");
|
||||
|
||||
struct IocFromIdParams {
|
||||
// Input
|
||||
u32_le id{};
|
||||
// Output
|
||||
u32_le handle{};
|
||||
};
|
||||
static_assert(sizeof(IocFromIdParams) == 8, "IocFromIdParams has wrong size");
|
||||
|
||||
struct IocAllocParams {
|
||||
// Input
|
||||
u32_le handle{};
|
||||
u32_le heap_mask{};
|
||||
u32_le flags{};
|
||||
u32_le align{};
|
||||
u8 kind{};
|
||||
INSERT_PADDING_BYTES(7);
|
||||
u64_le addr{};
|
||||
};
|
||||
static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size");
|
||||
|
||||
struct IocFreeParams {
|
||||
u32_le handle{};
|
||||
INSERT_PADDING_BYTES(4);
|
||||
u64_le address{};
|
||||
u32_le size{};
|
||||
u32_le flags{};
|
||||
};
|
||||
static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size");
|
||||
|
||||
struct IocParamParams {
|
||||
// Input
|
||||
u32_le handle{};
|
||||
u32_le param{};
|
||||
// Output
|
||||
u32_le result{};
|
||||
};
|
||||
static_assert(sizeof(IocParamParams) == 12, "IocParamParams has wrong size");
|
||||
|
||||
struct IocGetIdParams {
|
||||
// Output
|
||||
u32_le id{};
|
||||
// Input
|
||||
u32_le handle{};
|
||||
};
|
||||
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
||||
|
||||
u32 CreateObject(u32 size);
|
||||
|
||||
NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
248
src/core/hle/service/nvdrv/interface.cpp
Executable file
248
src/core/hle/service/nvdrv/interface.cpp
Executable file
@@ -0,0 +1,248 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cinttypes>
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/hle/service/nvdrv/interface.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
void NVDRV::SignalGPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
|
||||
nvdrv->SignalSyncpt(syncpoint_id, value);
|
||||
}
|
||||
|
||||
void NVDRV::Open(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
if (!is_initialized) {
|
||||
ServiceError(ctx, NvResult::NotInitialized);
|
||||
LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
|
||||
return;
|
||||
}
|
||||
|
||||
const auto& buffer = ctx.ReadBuffer();
|
||||
const std::string device_name(buffer.begin(), buffer.end());
|
||||
DeviceFD fd = nvdrv->Open(device_name);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push<DeviceFD>(fd);
|
||||
rb.PushEnum(fd != INVALID_NVDRV_FD ? NvResult::Success : NvResult::FileOperationFailed);
|
||||
}
|
||||
|
||||
void NVDRV::ServiceError(Kernel::HLERequestContext& ctx, NvResult result) {
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushEnum(result);
|
||||
}
|
||||
|
||||
void NVDRV::Ioctl1(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto fd = rp.Pop<DeviceFD>();
|
||||
const auto command = rp.PopRaw<Ioctl>();
|
||||
LOG_DEBUG(Service_NVDRV, "called fd={}, ioctl=0x{:08X}", fd, command.raw);
|
||||
|
||||
if (!is_initialized) {
|
||||
ServiceError(ctx, NvResult::NotInitialized);
|
||||
LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
|
||||
return;
|
||||
}
|
||||
|
||||
// Check device
|
||||
std::vector<u8> output_buffer(ctx.GetWriteBufferSize(0));
|
||||
const auto input_buffer = ctx.ReadBuffer(0);
|
||||
|
||||
const auto nv_result = nvdrv->Ioctl1(fd, command, input_buffer, output_buffer);
|
||||
if (command.is_out != 0) {
|
||||
ctx.WriteBuffer(output_buffer);
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushEnum(nv_result);
|
||||
}
|
||||
|
||||
void NVDRV::Ioctl2(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto fd = rp.Pop<DeviceFD>();
|
||||
const auto command = rp.PopRaw<Ioctl>();
|
||||
LOG_DEBUG(Service_NVDRV, "called fd={}, ioctl=0x{:08X}", fd, command.raw);
|
||||
|
||||
if (!is_initialized) {
|
||||
ServiceError(ctx, NvResult::NotInitialized);
|
||||
LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
|
||||
return;
|
||||
}
|
||||
|
||||
const auto input_buffer = ctx.ReadBuffer(0);
|
||||
const auto input_inlined_buffer = ctx.ReadBuffer(1);
|
||||
std::vector<u8> output_buffer(ctx.GetWriteBufferSize(0));
|
||||
|
||||
const auto nv_result =
|
||||
nvdrv->Ioctl2(fd, command, input_buffer, input_inlined_buffer, output_buffer);
|
||||
if (command.is_out != 0) {
|
||||
ctx.WriteBuffer(output_buffer);
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushEnum(nv_result);
|
||||
}
|
||||
|
||||
void NVDRV::Ioctl3(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto fd = rp.Pop<DeviceFD>();
|
||||
const auto command = rp.PopRaw<Ioctl>();
|
||||
LOG_DEBUG(Service_NVDRV, "called fd={}, ioctl=0x{:08X}", fd, command.raw);
|
||||
|
||||
if (!is_initialized) {
|
||||
ServiceError(ctx, NvResult::NotInitialized);
|
||||
LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
|
||||
return;
|
||||
}
|
||||
|
||||
const auto input_buffer = ctx.ReadBuffer(0);
|
||||
std::vector<u8> output_buffer(ctx.GetWriteBufferSize(0));
|
||||
std::vector<u8> output_buffer_inline(ctx.GetWriteBufferSize(1));
|
||||
|
||||
const auto nv_result =
|
||||
nvdrv->Ioctl3(fd, command, input_buffer, output_buffer, output_buffer_inline);
|
||||
if (command.is_out != 0) {
|
||||
ctx.WriteBuffer(output_buffer, 0);
|
||||
ctx.WriteBuffer(output_buffer_inline, 1);
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushEnum(nv_result);
|
||||
}
|
||||
|
||||
void NVDRV::Close(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
if (!is_initialized) {
|
||||
ServiceError(ctx, NvResult::NotInitialized);
|
||||
LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
|
||||
return;
|
||||
}
|
||||
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto fd = rp.Pop<DeviceFD>();
|
||||
const auto result = nvdrv->Close(fd);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushEnum(result);
|
||||
}
|
||||
|
||||
void NVDRV::Initialize(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
is_initialized = true;
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushEnum(NvResult::Success);
|
||||
}
|
||||
|
||||
void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto fd = rp.Pop<DeviceFD>();
|
||||
const auto event_id = rp.Pop<u32>() & 0x00FF;
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}, event_id={:X}", fd, event_id);
|
||||
|
||||
if (!is_initialized) {
|
||||
ServiceError(ctx, NvResult::NotInitialized);
|
||||
LOG_ERROR(Service_NVDRV, "NvServices is not initalized!");
|
||||
return;
|
||||
}
|
||||
|
||||
const auto nv_result = nvdrv->VerifyFD(fd);
|
||||
if (nv_result != NvResult::Success) {
|
||||
LOG_ERROR(Service_NVDRV, "Invalid FD specified DeviceFD={}!", fd);
|
||||
ServiceError(ctx, nv_result);
|
||||
return;
|
||||
}
|
||||
|
||||
if (event_id < MaxNvEvents) {
|
||||
IPC::ResponseBuilder rb{ctx, 3, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
auto event = nvdrv->GetEvent(event_id);
|
||||
event->Clear();
|
||||
rb.PushCopyObjects(event);
|
||||
rb.PushEnum(NvResult::Success);
|
||||
} else {
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushEnum(NvResult::BadParameter);
|
||||
}
|
||||
}
|
||||
|
||||
void NVDRV::SetAruid(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
pid = rp.Pop<u64>();
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, pid=0x{:X}", pid);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushEnum(NvResult::Success);
|
||||
}
|
||||
|
||||
void NVDRV::SetGraphicsFirmwareMemoryMarginEnabled(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void NVDRV::GetStatus(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushEnum(NvResult::Success);
|
||||
}
|
||||
|
||||
void NVDRV::DumpGraphicsMemoryInfo(Kernel::HLERequestContext& ctx) {
|
||||
// According to SwitchBrew, this has no inputs and no outputs, so effectively does nothing on
|
||||
// retail hardware.
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
NVDRV::NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name)
|
||||
: ServiceFramework{system_, name}, nvdrv{std::move(nvdrv_)} {
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &NVDRV::Open, "Open"},
|
||||
{1, &NVDRV::Ioctl1, "Ioctl"},
|
||||
{2, &NVDRV::Close, "Close"},
|
||||
{3, &NVDRV::Initialize, "Initialize"},
|
||||
{4, &NVDRV::QueryEvent, "QueryEvent"},
|
||||
{5, nullptr, "MapSharedMem"},
|
||||
{6, &NVDRV::GetStatus, "GetStatus"},
|
||||
{7, nullptr, "SetAruidForTest"},
|
||||
{8, &NVDRV::SetAruid, "SetAruid"},
|
||||
{9, &NVDRV::DumpGraphicsMemoryInfo, "DumpGraphicsMemoryInfo"},
|
||||
{10, nullptr, "InitializeDevtools"},
|
||||
{11, &NVDRV::Ioctl2, "Ioctl2"},
|
||||
{12, &NVDRV::Ioctl3, "Ioctl3"},
|
||||
{13, &NVDRV::SetGraphicsFirmwareMemoryMarginEnabled,
|
||||
"SetGraphicsFirmwareMemoryMarginEnabled"},
|
||||
};
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
NVDRV::~NVDRV() = default;
|
||||
|
||||
} // namespace Service::Nvidia
|
45
src/core/hle/service/nvdrv/interface.h
Executable file
45
src/core/hle/service/nvdrv/interface.h
Executable file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Kernel {
|
||||
class WritableEvent;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
class NVDRV final : public ServiceFramework<NVDRV> {
|
||||
public:
|
||||
explicit NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name);
|
||||
~NVDRV() override;
|
||||
|
||||
void SignalGPUInterruptSyncpt(u32 syncpoint_id, u32 value);
|
||||
|
||||
private:
|
||||
void Open(Kernel::HLERequestContext& ctx);
|
||||
void Ioctl1(Kernel::HLERequestContext& ctx);
|
||||
void Ioctl2(Kernel::HLERequestContext& ctx);
|
||||
void Ioctl3(Kernel::HLERequestContext& ctx);
|
||||
void Close(Kernel::HLERequestContext& ctx);
|
||||
void Initialize(Kernel::HLERequestContext& ctx);
|
||||
void QueryEvent(Kernel::HLERequestContext& ctx);
|
||||
void SetAruid(Kernel::HLERequestContext& ctx);
|
||||
void SetGraphicsFirmwareMemoryMarginEnabled(Kernel::HLERequestContext& ctx);
|
||||
void GetStatus(Kernel::HLERequestContext& ctx);
|
||||
void DumpGraphicsMemoryInfo(Kernel::HLERequestContext& ctx);
|
||||
|
||||
void ServiceError(Kernel::HLERequestContext& ctx, NvResult result);
|
||||
|
||||
std::shared_ptr<Module> nvdrv;
|
||||
|
||||
u64 pid{};
|
||||
bool is_initialized{};
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia
|
100
src/core/hle/service/nvdrv/nvdata.h
Executable file
100
src/core/hle/service/nvdrv/nvdata.h
Executable file
@@ -0,0 +1,100 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
constexpr u32 MaxSyncPoints = 192;
|
||||
constexpr u32 MaxNvEvents = 64;
|
||||
using DeviceFD = s32;
|
||||
|
||||
constexpr DeviceFD INVALID_NVDRV_FD = -1;
|
||||
|
||||
struct Fence {
|
||||
s32 id;
|
||||
u32 value;
|
||||
};
|
||||
|
||||
static_assert(sizeof(Fence) == 8, "Fence has wrong size");
|
||||
|
||||
struct MultiFence {
|
||||
u32 num_fences;
|
||||
std::array<Fence, 4> fences;
|
||||
};
|
||||
|
||||
enum class NvResult : u32 {
|
||||
Success = 0x0,
|
||||
NotImplemented = 0x1,
|
||||
NotSupported = 0x2,
|
||||
NotInitialized = 0x3,
|
||||
BadParameter = 0x4,
|
||||
Timeout = 0x5,
|
||||
InsufficientMemory = 0x6,
|
||||
ReadOnlyAttribute = 0x7,
|
||||
InvalidState = 0x8,
|
||||
InvalidAddress = 0x9,
|
||||
InvalidSize = 0xA,
|
||||
BadValue = 0xB,
|
||||
AlreadyAllocated = 0xD,
|
||||
Busy = 0xE,
|
||||
ResourceError = 0xF,
|
||||
CountMismatch = 0x10,
|
||||
OverFlow = 0x11,
|
||||
InsufficientTransferMemory = 0x1000,
|
||||
InsufficientVideoMemory = 0x10000,
|
||||
BadSurfaceColorScheme = 0x10001,
|
||||
InvalidSurface = 0x10002,
|
||||
SurfaceNotSupported = 0x10003,
|
||||
DispInitFailed = 0x20000,
|
||||
DispAlreadyAttached = 0x20001,
|
||||
DispTooManyDisplays = 0x20002,
|
||||
DispNoDisplaysAttached = 0x20003,
|
||||
DispModeNotSupported = 0x20004,
|
||||
DispNotFound = 0x20005,
|
||||
DispAttachDissallowed = 0x20006,
|
||||
DispTypeNotSupported = 0x20007,
|
||||
DispAuthenticationFailed = 0x20008,
|
||||
DispNotAttached = 0x20009,
|
||||
DispSamePwrState = 0x2000A,
|
||||
DispEdidFailure = 0x2000B,
|
||||
DispDsiReadAckError = 0x2000C,
|
||||
DispDsiReadInvalidResp = 0x2000D,
|
||||
FileWriteFailed = 0x30000,
|
||||
FileReadFailed = 0x30001,
|
||||
EndOfFile = 0x30002,
|
||||
FileOperationFailed = 0x30003,
|
||||
DirOperationFailed = 0x30004,
|
||||
EndOfDirList = 0x30005,
|
||||
ConfigVarNotFound = 0x30006,
|
||||
InvalidConfigVar = 0x30007,
|
||||
LibraryNotFound = 0x30008,
|
||||
SymbolNotFound = 0x30009,
|
||||
MemoryMapFailed = 0x3000A,
|
||||
IoctlFailed = 0x3000F,
|
||||
AccessDenied = 0x30010,
|
||||
DeviceNotFound = 0x30011,
|
||||
KernelDriverNotFound = 0x30012,
|
||||
FileNotFound = 0x30013,
|
||||
PathAlreadyExists = 0x30014,
|
||||
ModuleNotPresent = 0xA000E,
|
||||
};
|
||||
|
||||
enum class EventState {
|
||||
Free = 0,
|
||||
Registered = 1,
|
||||
Waiting = 2,
|
||||
Busy = 3,
|
||||
};
|
||||
|
||||
union Ioctl {
|
||||
u32_le raw;
|
||||
BitField<0, 8, u32> cmd;
|
||||
BitField<8, 8, u32> group;
|
||||
BitField<16, 14, u32> length;
|
||||
BitField<30, 1, u32> is_in;
|
||||
BitField<31, 1, u32> is_out;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia
|
180
src/core/hle/service/nvdrv/nvdrv.cpp
Executable file
180
src/core/hle/service/nvdrv/nvdrv.cpp
Executable file
@@ -0,0 +1,180 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include "core/core.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/interface.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
#include "core/hle/service/nvdrv/nvmemp.h"
|
||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
||||
#include "core/hle/service/nvflinger/nvflinger.h"
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger,
|
||||
Core::System& system) {
|
||||
auto module_ = std::make_shared<Module>(system);
|
||||
std::make_shared<NVDRV>(system, module_, "nvdrv")->InstallAsService(service_manager);
|
||||
std::make_shared<NVDRV>(system, module_, "nvdrv:a")->InstallAsService(service_manager);
|
||||
std::make_shared<NVDRV>(system, module_, "nvdrv:s")->InstallAsService(service_manager);
|
||||
std::make_shared<NVDRV>(system, module_, "nvdrv:t")->InstallAsService(service_manager);
|
||||
std::make_shared<NVMEMP>(system)->InstallAsService(service_manager);
|
||||
nvflinger.SetNVDrvInstance(module_);
|
||||
}
|
||||
|
||||
Module::Module(Core::System& system) : syncpoint_manager{system.GPU()} {
|
||||
auto& kernel = system.Kernel();
|
||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||
std::string event_label = fmt::format("NVDRV::NvEvent_{}", i);
|
||||
events_interface.events[i] = {Kernel::WritableEvent::CreateEventPair(kernel, event_label)};
|
||||
events_interface.status[i] = EventState::Free;
|
||||
events_interface.registered[i] = false;
|
||||
}
|
||||
auto nvmap_dev = std::make_shared<Devices::nvmap>(system);
|
||||
devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, nvmap_dev);
|
||||
devices["/dev/nvhost-gpu"] =
|
||||
std::make_shared<Devices::nvhost_gpu>(system, nvmap_dev, syncpoint_manager);
|
||||
devices["/dev/nvhost-ctrl-gpu"] = std::make_shared<Devices::nvhost_ctrl_gpu>(system);
|
||||
devices["/dev/nvmap"] = nvmap_dev;
|
||||
devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, nvmap_dev);
|
||||
devices["/dev/nvhost-ctrl"] =
|
||||
std::make_shared<Devices::nvhost_ctrl>(system, events_interface, syncpoint_manager);
|
||||
devices["/dev/nvhost-nvdec"] = std::make_shared<Devices::nvhost_nvdec>(system, nvmap_dev);
|
||||
devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system);
|
||||
devices["/dev/nvhost-vic"] = std::make_shared<Devices::nvhost_vic>(system, nvmap_dev);
|
||||
}
|
||||
|
||||
Module::~Module() = default;
|
||||
|
||||
NvResult Module::VerifyFD(DeviceFD fd) const {
|
||||
if (fd < 0) {
|
||||
LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
|
||||
if (open_files.find(fd) == open_files.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
DeviceFD Module::Open(const std::string& device_name) {
|
||||
if (devices.find(device_name) == devices.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
|
||||
return INVALID_NVDRV_FD;
|
||||
}
|
||||
|
||||
auto device = devices[device_name];
|
||||
const DeviceFD fd = next_fd++;
|
||||
|
||||
open_files[fd] = std::move(device);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
NvResult Module::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
if (fd < 0) {
|
||||
LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
|
||||
const auto itr = open_files.find(fd);
|
||||
|
||||
if (itr == open_files.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
return itr->second->Ioctl1(command, input, output);
|
||||
}
|
||||
|
||||
NvResult Module::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
if (fd < 0) {
|
||||
LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
|
||||
const auto itr = open_files.find(fd);
|
||||
|
||||
if (itr == open_files.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
return itr->second->Ioctl2(command, input, inline_input, output);
|
||||
}
|
||||
|
||||
NvResult Module::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output, std::vector<u8>& inline_output) {
|
||||
if (fd < 0) {
|
||||
LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
|
||||
const auto itr = open_files.find(fd);
|
||||
|
||||
if (itr == open_files.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
return itr->second->Ioctl3(command, input, output, inline_output);
|
||||
}
|
||||
|
||||
NvResult Module::Close(DeviceFD fd) {
|
||||
if (fd < 0) {
|
||||
LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
|
||||
const auto itr = open_files.find(fd);
|
||||
|
||||
if (itr == open_files.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
open_files.erase(itr);
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) {
|
||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||
if (events_interface.assigned_syncpt[i] == syncpoint_id &&
|
||||
events_interface.assigned_value[i] == value) {
|
||||
events_interface.LiberateEvent(i);
|
||||
events_interface.events[i].event.writable->Signal();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Kernel::ReadableEvent> Module::GetEvent(const u32 event_id) const {
|
||||
return events_interface.events[event_id].event.readable;
|
||||
}
|
||||
|
||||
std::shared_ptr<Kernel::WritableEvent> Module::GetEventWriteable(const u32 event_id) const {
|
||||
return events_interface.events[event_id].event.writable;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia
|
159
src/core/hle/service/nvdrv/nvdrv.h
Executable file
159
src/core/hle/service/nvdrv/nvdrv.h
Executable file
@@ -0,0 +1,159 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Service::NVFlinger {
|
||||
class NVFlinger;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
class SyncpointManager;
|
||||
|
||||
namespace Devices {
|
||||
class nvdevice;
|
||||
}
|
||||
|
||||
/// Represents an Nvidia event
|
||||
struct NvEvent {
|
||||
Kernel::EventPair event;
|
||||
Fence fence{};
|
||||
};
|
||||
|
||||
struct EventInterface {
|
||||
// Mask representing currently busy events
|
||||
u64 events_mask{};
|
||||
// Each kernel event associated to an NV event
|
||||
std::array<NvEvent, MaxNvEvents> events;
|
||||
// The status of the current NVEvent
|
||||
std::array<EventState, MaxNvEvents> status{};
|
||||
// Tells if an NVEvent is registered or not
|
||||
std::array<bool, MaxNvEvents> registered{};
|
||||
// When an NVEvent is waiting on GPU interrupt, this is the sync_point
|
||||
// associated with it.
|
||||
std::array<u32, MaxNvEvents> assigned_syncpt{};
|
||||
// This is the value of the GPU interrupt for which the NVEvent is waiting
|
||||
// for.
|
||||
std::array<u32, MaxNvEvents> assigned_value{};
|
||||
// Constant to denote an unasigned syncpoint.
|
||||
static constexpr u32 unassigned_syncpt = 0xFFFFFFFF;
|
||||
std::optional<u32> GetFreeEvent() const {
|
||||
u64 mask = events_mask;
|
||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||
const bool is_free = (mask & 0x1) == 0;
|
||||
if (is_free) {
|
||||
if (status[i] == EventState::Registered || status[i] == EventState::Free) {
|
||||
return {i};
|
||||
}
|
||||
}
|
||||
mask = mask >> 1;
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
void SetEventStatus(const u32 event_id, EventState new_status) {
|
||||
EventState old_status = status[event_id];
|
||||
if (old_status == new_status) {
|
||||
return;
|
||||
}
|
||||
status[event_id] = new_status;
|
||||
if (new_status == EventState::Registered) {
|
||||
registered[event_id] = true;
|
||||
}
|
||||
if (new_status == EventState::Waiting || new_status == EventState::Busy) {
|
||||
events_mask |= (1ULL << event_id);
|
||||
}
|
||||
}
|
||||
void RegisterEvent(const u32 event_id) {
|
||||
registered[event_id] = true;
|
||||
if (status[event_id] == EventState::Free) {
|
||||
status[event_id] = EventState::Registered;
|
||||
}
|
||||
}
|
||||
void UnregisterEvent(const u32 event_id) {
|
||||
registered[event_id] = false;
|
||||
if (status[event_id] == EventState::Registered) {
|
||||
status[event_id] = EventState::Free;
|
||||
}
|
||||
}
|
||||
void LiberateEvent(const u32 event_id) {
|
||||
status[event_id] = registered[event_id] ? EventState::Registered : EventState::Free;
|
||||
events_mask &= ~(1ULL << event_id);
|
||||
assigned_syncpt[event_id] = unassigned_syncpt;
|
||||
assigned_value[event_id] = 0;
|
||||
}
|
||||
};
|
||||
|
||||
class Module final {
|
||||
public:
|
||||
explicit Module(Core::System& system_);
|
||||
~Module();
|
||||
|
||||
/// Returns a pointer to one of the available devices, identified by its name.
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetDevice(const std::string& name) {
|
||||
auto itr = devices.find(name);
|
||||
if (itr == devices.end())
|
||||
return nullptr;
|
||||
return std::static_pointer_cast<T>(itr->second);
|
||||
}
|
||||
|
||||
NvResult VerifyFD(DeviceFD fd) const;
|
||||
|
||||
/// Opens a device node and returns a file descriptor to it.
|
||||
DeviceFD Open(const std::string& device_name);
|
||||
|
||||
/// Sends an ioctl command to the specified file descriptor.
|
||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output);
|
||||
|
||||
NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output);
|
||||
|
||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output, std::vector<u8>& inline_output);
|
||||
|
||||
/// Closes a device file descriptor and returns operation success.
|
||||
NvResult Close(DeviceFD fd);
|
||||
|
||||
void SignalSyncpt(const u32 syncpoint_id, const u32 value);
|
||||
|
||||
std::shared_ptr<Kernel::ReadableEvent> GetEvent(u32 event_id) const;
|
||||
|
||||
std::shared_ptr<Kernel::WritableEvent> GetEventWriteable(u32 event_id) const;
|
||||
|
||||
private:
|
||||
/// Manages syncpoints on the host
|
||||
SyncpointManager syncpoint_manager;
|
||||
|
||||
/// Id to use for the next open file descriptor.
|
||||
DeviceFD next_fd = 1;
|
||||
|
||||
/// Mapping of file descriptors to the devices they reference.
|
||||
std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>> open_files;
|
||||
|
||||
/// Mapping of device node names to their implementation.
|
||||
std::unordered_map<std::string, std::shared_ptr<Devices::nvdevice>> devices;
|
||||
|
||||
EventInterface events_interface;
|
||||
};
|
||||
|
||||
/// Registers all NVDRV services with the specified service manager.
|
||||
void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger,
|
||||
Core::System& system);
|
||||
|
||||
} // namespace Service::Nvidia
|
29
src/core/hle/service/nvdrv/nvmemp.cpp
Executable file
29
src/core/hle/service/nvdrv/nvmemp.cpp
Executable file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/service/nvdrv/nvmemp.h"
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
NVMEMP::NVMEMP(Core::System& system_) : ServiceFramework{system_, "nvmemp"} {
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &NVMEMP::Open, "Open"},
|
||||
{1, &NVMEMP::GetAruid, "GetAruid"},
|
||||
};
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
NVMEMP::~NVMEMP() = default;
|
||||
|
||||
void NVMEMP::Open(Kernel::HLERequestContext& ctx) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void NVMEMP::GetAruid(Kernel::HLERequestContext& ctx) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia
|
25
src/core/hle/service/nvdrv/nvmemp.h
Executable file
25
src/core/hle/service/nvdrv/nvmemp.h
Executable file
@@ -0,0 +1,25 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
class NVMEMP final : public ServiceFramework<NVMEMP> {
|
||||
public:
|
||||
explicit NVMEMP(Core::System& system_);
|
||||
~NVMEMP() override;
|
||||
|
||||
private:
|
||||
void Open(Kernel::HLERequestContext& ctx);
|
||||
void GetAruid(Kernel::HLERequestContext& ctx);
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia
|
39
src/core/hle/service/nvdrv/syncpoint_manager.cpp
Executable file
39
src/core/hle/service/nvdrv/syncpoint_manager.cpp
Executable file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
||||
#include "video_core/gpu.h"
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
SyncpointManager::SyncpointManager(Tegra::GPU& gpu) : gpu{gpu} {}
|
||||
|
||||
SyncpointManager::~SyncpointManager() = default;
|
||||
|
||||
u32 SyncpointManager::RefreshSyncpoint(u32 syncpoint_id) {
|
||||
syncpoints[syncpoint_id].min = gpu.GetSyncpointValue(syncpoint_id);
|
||||
return GetSyncpointMin(syncpoint_id);
|
||||
}
|
||||
|
||||
u32 SyncpointManager::AllocateSyncpoint() {
|
||||
for (u32 syncpoint_id = 1; syncpoint_id < MaxSyncPoints; syncpoint_id++) {
|
||||
if (!syncpoints[syncpoint_id].is_allocated) {
|
||||
syncpoints[syncpoint_id].is_allocated = true;
|
||||
return syncpoint_id;
|
||||
}
|
||||
}
|
||||
UNREACHABLE_MSG("No more available syncpoints!");
|
||||
return {};
|
||||
}
|
||||
|
||||
u32 SyncpointManager::IncreaseSyncpoint(u32 syncpoint_id, u32 value) {
|
||||
for (u32 index = 0; index < value; ++index) {
|
||||
syncpoints[syncpoint_id].max.fetch_add(1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
return GetSyncpointMax(syncpoint_id);
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia
|
85
src/core/hle/service/nvdrv/syncpoint_manager.h
Executable file
85
src/core/hle/service/nvdrv/syncpoint_manager.h
Executable file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
|
||||
namespace Tegra {
|
||||
class GPU;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
class SyncpointManager final {
|
||||
public:
|
||||
explicit SyncpointManager(Tegra::GPU& gpu);
|
||||
~SyncpointManager();
|
||||
|
||||
/**
|
||||
* Returns true if the specified syncpoint is expired for the given value.
|
||||
* @param syncpoint_id Syncpoint ID to check.
|
||||
* @param value Value to check against the specified syncpoint.
|
||||
* @returns True if the specified syncpoint is expired for the given value, otherwise False.
|
||||
*/
|
||||
bool IsSyncpointExpired(u32 syncpoint_id, u32 value) const {
|
||||
return (GetSyncpointMax(syncpoint_id) - value) >= (GetSyncpointMin(syncpoint_id) - value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the lower bound for the specified syncpoint.
|
||||
* @param syncpoint_id Syncpoint ID to get the lower bound for.
|
||||
* @returns The lower bound for the specified syncpoint.
|
||||
*/
|
||||
u32 GetSyncpointMin(u32 syncpoint_id) const {
|
||||
return syncpoints.at(syncpoint_id).min.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the uper bound for the specified syncpoint.
|
||||
* @param syncpoint_id Syncpoint ID to get the upper bound for.
|
||||
* @returns The upper bound for the specified syncpoint.
|
||||
*/
|
||||
u32 GetSyncpointMax(u32 syncpoint_id) const {
|
||||
return syncpoints.at(syncpoint_id).max.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/**
|
||||
* Refreshes the minimum value for the specified syncpoint.
|
||||
* @param syncpoint_id Syncpoint ID to be refreshed.
|
||||
* @returns The new syncpoint minimum value.
|
||||
*/
|
||||
u32 RefreshSyncpoint(u32 syncpoint_id);
|
||||
|
||||
/**
|
||||
* Allocates a new syncoint.
|
||||
* @returns The syncpoint ID for the newly allocated syncpoint.
|
||||
*/
|
||||
u32 AllocateSyncpoint();
|
||||
|
||||
/**
|
||||
* Increases the maximum value for the specified syncpoint.
|
||||
* @param syncpoint_id Syncpoint ID to be increased.
|
||||
* @param value Value to increase the specified syncpoint by.
|
||||
* @returns The new syncpoint maximum value.
|
||||
*/
|
||||
u32 IncreaseSyncpoint(u32 syncpoint_id, u32 value);
|
||||
|
||||
private:
|
||||
struct Syncpoint {
|
||||
std::atomic<u32> min;
|
||||
std::atomic<u32> max;
|
||||
std::atomic<bool> is_allocated;
|
||||
};
|
||||
|
||||
std::array<Syncpoint, MaxSyncPoints> syncpoints{};
|
||||
|
||||
Tegra::GPU& gpu;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia
|
Reference in New Issue
Block a user