early-access version 1255
This commit is contained in:
63
src/core/hle/service/nvdrv/devices/nvdevice.h
Executable file
63
src/core/hle/service/nvdrv/devices/nvdevice.h
Executable file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
/// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to
|
||||
/// implement the ioctl interface.
|
||||
class nvdevice {
|
||||
public:
|
||||
explicit nvdevice(Core::System& system) : system{system} {}
|
||||
virtual ~nvdevice() = default;
|
||||
|
||||
/**
|
||||
* Handles an ioctl1 request.
|
||||
* @param command The ioctl command id.
|
||||
* @param input A buffer containing the input data for the ioctl.
|
||||
* @param output A buffer where the output data will be written to.
|
||||
* @returns The result code of the ioctl.
|
||||
*/
|
||||
virtual NvResult Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) = 0;
|
||||
|
||||
/**
|
||||
* Handles an ioctl2 request.
|
||||
* @param command The ioctl command id.
|
||||
* @param input A buffer containing the input data for the ioctl.
|
||||
* @param inline_input A buffer containing the input data for the ioctl which has been inlined.
|
||||
* @param output A buffer where the output data will be written to.
|
||||
* @returns The result code of the ioctl.
|
||||
*/
|
||||
virtual NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) = 0;
|
||||
|
||||
/**
|
||||
* Handles an ioctl3 request.
|
||||
* @param command The ioctl command id.
|
||||
* @param input A buffer containing the input data for the ioctl.
|
||||
* @param output A buffer where the output data will be written to.
|
||||
* @param inline_output A buffer where the inlined output data will be written to.
|
||||
* @returns The result code of the ioctl.
|
||||
*/
|
||||
virtual NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) = 0;
|
||||
|
||||
protected:
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
59
src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
Executable file
59
src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
Executable file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/perf_stats.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvdisp_disp0::nvdisp_disp0(Core::System& system, std::shared_ptr<nvmap> nvmap_dev)
|
||||
: nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {}
|
||||
nvdisp_disp0 ::~nvdisp_disp0() = default;
|
||||
|
||||
NvResult nvdisp_disp0::Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvdisp_disp0::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvdisp_disp0::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height,
|
||||
u32 stride, NVFlinger::BufferQueue::BufferTransformFlags transform,
|
||||
const Common::Rectangle<int>& crop_rect) {
|
||||
VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle);
|
||||
LOG_TRACE(Service,
|
||||
"Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
|
||||
addr, offset, width, height, stride, format);
|
||||
|
||||
using PixelFormat = Tegra::FramebufferConfig::PixelFormat;
|
||||
const Tegra::FramebufferConfig framebuffer{
|
||||
addr, offset, width, height, stride, static_cast<PixelFormat>(format),
|
||||
transform, crop_rect};
|
||||
|
||||
system.GetPerfStats().EndGameFrame();
|
||||
system.GetPerfStats().EndSystemFrame();
|
||||
system.GPU().SwapBuffers(&framebuffer);
|
||||
system.FrameLimiter().DoFrameLimiting(system.CoreTiming().GetGlobalTimeUs());
|
||||
system.GetPerfStats().BeginSystemFrame();
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
38
src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
Executable file
38
src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
Executable file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/math_util.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
#include "core/hle/service/nvflinger/buffer_queue.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvmap;
|
||||
|
||||
class nvdisp_disp0 final : public nvdevice {
|
||||
public:
|
||||
explicit nvdisp_disp0(Core::System& system, std::shared_ptr<nvmap> nvmap_dev);
|
||||
~nvdisp_disp0() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
/// Performs a screen flip, drawing the buffer pointed to by the handle.
|
||||
void flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height, u32 stride,
|
||||
NVFlinger::BufferQueue::BufferTransformFlags transform,
|
||||
const Common::Rectangle<int>& crop_rect);
|
||||
|
||||
private:
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
349
src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
Executable file
349
src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
Executable file
@@ -0,0 +1,349 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include <utility>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_as_gpu::nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev)
|
||||
: nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {}
|
||||
nvhost_as_gpu::~nvhost_as_gpu() = default;
|
||||
|
||||
NvResult nvhost_as_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 'A':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return BindChannel(input, output);
|
||||
case 0x2:
|
||||
return AllocateSpace(input, output);
|
||||
case 0x3:
|
||||
return FreeSpace(input, output);
|
||||
case 0x5:
|
||||
return UnmapBuffer(input, output);
|
||||
case 0x6:
|
||||
return MapBufferEx(input, output);
|
||||
case 0x8:
|
||||
return GetVARegions(input, output);
|
||||
case 0x9:
|
||||
return InitalizeEx(input, output);
|
||||
case 0x14:
|
||||
return Remap(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
switch (command.group) {
|
||||
case 'A':
|
||||
switch (command.cmd) {
|
||||
case 0x8:
|
||||
return GetVARegions(input, output, inline_output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlInitalizeEx params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size);
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlAllocSpace params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
|
||||
params.page_size, params.flags);
|
||||
|
||||
const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
|
||||
if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) {
|
||||
params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size);
|
||||
} else {
|
||||
params.offset = system.GPU().MemoryManager().Allocate(size, params.align);
|
||||
}
|
||||
|
||||
auto result = NvResult::Success;
|
||||
if (!params.offset) {
|
||||
LOG_CRITICAL(Service_NVDRV, "allocation failed for size {}", size);
|
||||
result = NvResult::InsufficientMemory;
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return result;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlFreeSpace params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
|
||||
params.pages, params.page_size);
|
||||
|
||||
system.GPU().MemoryManager().Unmap(params.offset,
|
||||
static_cast<std::size_t>(params.pages) * params.page_size);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
const auto num_entries = input.size() / sizeof(IoctlRemapEntry);
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
|
||||
|
||||
auto result = NvResult::Success;
|
||||
std::vector<IoctlRemapEntry> entries(num_entries);
|
||||
std::memcpy(entries.data(), input.data(), input.size());
|
||||
|
||||
for (const auto& entry : entries) {
|
||||
LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}",
|
||||
entry.offset, entry.nvmap_handle, entry.pages);
|
||||
|
||||
const auto object{nvmap_dev->GetObject(entry.nvmap_handle)};
|
||||
if (!object) {
|
||||
LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle);
|
||||
result = NvResult::InvalidState;
|
||||
break;
|
||||
}
|
||||
|
||||
const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10};
|
||||
const auto size{static_cast<u64>(entry.pages) << 0x10};
|
||||
const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10};
|
||||
const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)};
|
||||
|
||||
if (!addr) {
|
||||
LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!");
|
||||
result = NvResult::InvalidState;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), entries.data(), output.size());
|
||||
return result;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlMapBufferEx params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_DEBUG(Service_NVDRV,
|
||||
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
|
||||
", offset={}",
|
||||
params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size,
|
||||
params.offset);
|
||||
|
||||
const auto object{nvmap_dev->GetObject(params.nvmap_handle)};
|
||||
if (!object) {
|
||||
LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
|
||||
// The real nvservices doesn't make a distinction between handles and ids, and
|
||||
// object can only have one handle and it will be the same as its id. Assert that this is the
|
||||
// case to prevent unexpected behavior.
|
||||
ASSERT(object->id == params.nvmap_handle);
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
u64 page_size{params.page_size};
|
||||
if (!page_size) {
|
||||
page_size = object->align;
|
||||
}
|
||||
|
||||
if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) {
|
||||
if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) {
|
||||
const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)};
|
||||
const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)};
|
||||
|
||||
if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) {
|
||||
LOG_CRITICAL(Service_NVDRV,
|
||||
"remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, "
|
||||
"mapping_size = {}, offset={}",
|
||||
params.flags, params.nvmap_handle, params.buffer_offset,
|
||||
params.mapping_size, params.offset);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
} else {
|
||||
LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
}
|
||||
|
||||
// We can only map objects that have already been assigned a CPU address.
|
||||
ASSERT(object->status == nvmap::Object::Status::Allocated);
|
||||
|
||||
const auto physical_address{object->addr + params.buffer_offset};
|
||||
u64 size{params.mapping_size};
|
||||
if (!size) {
|
||||
size = object->size;
|
||||
}
|
||||
|
||||
const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None};
|
||||
if (is_alloc) {
|
||||
params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size);
|
||||
} else {
|
||||
params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size);
|
||||
}
|
||||
|
||||
auto result = NvResult::Success;
|
||||
if (!params.offset) {
|
||||
LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size);
|
||||
result = NvResult::InvalidState;
|
||||
} else {
|
||||
AddBufferMap(params.offset, size, physical_address, is_alloc);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return result;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlUnmapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
|
||||
|
||||
if (const auto size{RemoveBufferMap(params.offset)}; size) {
|
||||
system.GPU().MemoryManager().Unmap(params.offset, *size);
|
||||
} else {
|
||||
LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlBindChannel params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}", params.fd);
|
||||
|
||||
channel = params.fd;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetVaRegions params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||
params.buf_size);
|
||||
|
||||
params.buf_size = 0x30;
|
||||
params.regions[0].offset = 0x04000000;
|
||||
params.regions[0].page_size = 0x1000;
|
||||
params.regions[0].pages = 0x3fbfff;
|
||||
|
||||
params.regions[1].offset = 0x04000000;
|
||||
params.regions[1].page_size = 0x10000;
|
||||
params.regions[1].pages = 0x1bffff;
|
||||
|
||||
// TODO(ogniK): This probably can stay stubbed but should add support way way later
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
IoctlGetVaRegions params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||
params.buf_size);
|
||||
|
||||
params.buf_size = 0x30;
|
||||
params.regions[0].offset = 0x04000000;
|
||||
params.regions[0].page_size = 0x1000;
|
||||
params.regions[0].pages = 0x3fbfff;
|
||||
|
||||
params.regions[1].offset = 0x04000000;
|
||||
params.regions[1].page_size = 0x10000;
|
||||
params.regions[1].pages = 0x1bffff;
|
||||
|
||||
// TODO(ogniK): This probably can stay stubbed but should add support way way later
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
std::memcpy(inline_output.data(), ¶ms.regions, inline_output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
std::optional<nvhost_as_gpu::BufferMap> nvhost_as_gpu::FindBufferMap(GPUVAddr gpu_addr) const {
|
||||
const auto end{buffer_mappings.upper_bound(gpu_addr)};
|
||||
for (auto iter{buffer_mappings.begin()}; iter != end; ++iter) {
|
||||
if (gpu_addr >= iter->second.StartAddr() && gpu_addr < iter->second.EndAddr()) {
|
||||
return iter->second;
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr,
|
||||
bool is_allocated) {
|
||||
buffer_mappings[gpu_addr] = {gpu_addr, size, cpu_addr, is_allocated};
|
||||
}
|
||||
|
||||
std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) {
|
||||
if (const auto iter{buffer_mappings.find(gpu_addr)}; iter != buffer_mappings.end()) {
|
||||
std::size_t size{};
|
||||
|
||||
if (iter->second.IsAllocated()) {
|
||||
size = iter->second.Size();
|
||||
}
|
||||
|
||||
buffer_mappings.erase(iter);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
181
src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
Executable file
181
src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
Executable file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvmap;
|
||||
|
||||
enum class AddressSpaceFlags : u32 {
|
||||
None = 0x0,
|
||||
FixedOffset = 0x1,
|
||||
Remap = 0x100,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags);
|
||||
|
||||
class nvhost_as_gpu final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev);
|
||||
~nvhost_as_gpu() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
private:
|
||||
class BufferMap final {
|
||||
public:
|
||||
constexpr BufferMap() = default;
|
||||
|
||||
constexpr BufferMap(GPUVAddr start_addr, std::size_t size)
|
||||
: start_addr{start_addr}, end_addr{start_addr + size} {}
|
||||
|
||||
constexpr BufferMap(GPUVAddr start_addr, std::size_t size, VAddr cpu_addr,
|
||||
bool is_allocated)
|
||||
: start_addr{start_addr}, end_addr{start_addr + size}, cpu_addr{cpu_addr},
|
||||
is_allocated{is_allocated} {}
|
||||
|
||||
constexpr VAddr StartAddr() const {
|
||||
return start_addr;
|
||||
}
|
||||
|
||||
constexpr VAddr EndAddr() const {
|
||||
return end_addr;
|
||||
}
|
||||
|
||||
constexpr std::size_t Size() const {
|
||||
return end_addr - start_addr;
|
||||
}
|
||||
|
||||
constexpr VAddr CpuAddr() const {
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
constexpr bool IsAllocated() const {
|
||||
return is_allocated;
|
||||
}
|
||||
|
||||
private:
|
||||
GPUVAddr start_addr{};
|
||||
GPUVAddr end_addr{};
|
||||
VAddr cpu_addr{};
|
||||
bool is_allocated{};
|
||||
};
|
||||
|
||||
struct IoctlInitalizeEx {
|
||||
u32_le big_page_size{}; // depends on GPU's available_big_page_sizes; 0=default
|
||||
s32_le as_fd{}; // ignored; passes 0
|
||||
u32_le flags{}; // passes 0
|
||||
u32_le reserved{}; // ignored; passes 0
|
||||
u64_le unk0{};
|
||||
u64_le unk1{};
|
||||
u64_le unk2{};
|
||||
};
|
||||
static_assert(sizeof(IoctlInitalizeEx) == 40, "IoctlInitalizeEx is incorrect size");
|
||||
|
||||
struct IoctlAllocSpace {
|
||||
u32_le pages{};
|
||||
u32_le page_size{};
|
||||
AddressSpaceFlags flags{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
union {
|
||||
u64_le offset;
|
||||
u64_le align;
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(IoctlAllocSpace) == 24, "IoctlInitalizeEx is incorrect size");
|
||||
|
||||
struct IoctlFreeSpace {
|
||||
u64_le offset{};
|
||||
u32_le pages{};
|
||||
u32_le page_size{};
|
||||
};
|
||||
static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size");
|
||||
|
||||
struct IoctlRemapEntry {
|
||||
u16_le flags{};
|
||||
u16_le kind{};
|
||||
u32_le nvmap_handle{};
|
||||
u32_le map_offset{};
|
||||
u32_le offset{};
|
||||
u32_le pages{};
|
||||
};
|
||||
static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size");
|
||||
|
||||
struct IoctlMapBufferEx {
|
||||
AddressSpaceFlags flags{}; // bit0: fixed_offset, bit2: cacheable
|
||||
u32_le kind{}; // -1 is default
|
||||
u32_le nvmap_handle{};
|
||||
u32_le page_size{}; // 0 means don't care
|
||||
s64_le buffer_offset{};
|
||||
u64_le mapping_size{};
|
||||
s64_le offset{};
|
||||
};
|
||||
static_assert(sizeof(IoctlMapBufferEx) == 40, "IoctlMapBufferEx is incorrect size");
|
||||
|
||||
struct IoctlUnmapBuffer {
|
||||
s64_le offset{};
|
||||
};
|
||||
static_assert(sizeof(IoctlUnmapBuffer) == 8, "IoctlUnmapBuffer is incorrect size");
|
||||
|
||||
struct IoctlBindChannel {
|
||||
s32_le fd{};
|
||||
};
|
||||
static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size");
|
||||
|
||||
struct IoctlVaRegion {
|
||||
u64_le offset{};
|
||||
u32_le page_size{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
u64_le pages{};
|
||||
};
|
||||
static_assert(sizeof(IoctlVaRegion) == 24, "IoctlVaRegion is incorrect size");
|
||||
|
||||
struct IoctlGetVaRegions {
|
||||
u64_le buf_addr{}; // (contained output user ptr on linux, ignored)
|
||||
u32_le buf_size{}; // forced to 2*sizeof(struct va_region)
|
||||
u32_le reserved{};
|
||||
IoctlVaRegion regions[2]{};
|
||||
};
|
||||
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2,
|
||||
"IoctlGetVaRegions is incorrect size");
|
||||
|
||||
s32 channel{};
|
||||
|
||||
NvResult InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output);
|
||||
|
||||
std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const;
|
||||
void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated);
|
||||
std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr);
|
||||
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
|
||||
// This is expected to be ordered, therefore we must use a map, not unordered_map
|
||||
std::map<GPUVAddr, BufferMap> buffer_mappings;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
199
src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
Executable file
199
src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
Executable file
@@ -0,0 +1,199 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
|
||||
#include "video_core/gpu.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_ctrl::nvhost_ctrl(Core::System& system, EventInterface& events_interface,
|
||||
SyncpointManager& syncpoint_manager)
|
||||
: nvdevice(system), events_interface{events_interface}, syncpoint_manager{syncpoint_manager} {}
|
||||
nvhost_ctrl::~nvhost_ctrl() = default;
|
||||
|
||||
NvResult nvhost_ctrl::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x1b:
|
||||
return NvOsGetConfigU32(input, output);
|
||||
case 0x1c:
|
||||
return IocCtrlClearEventWait(input, output);
|
||||
case 0x1d:
|
||||
return IocCtrlEventWait(input, output, false);
|
||||
case 0x1e:
|
||||
return IocCtrlEventWait(input, output, true);
|
||||
case 0x1f:
|
||||
return IocCtrlEventRegister(input, output);
|
||||
case 0x20:
|
||||
return IocCtrlEventUnregister(input, output);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_outpu) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocGetConfigParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
LOG_TRACE(Service_NVDRV, "called, setting={}!{}", params.domain_str.data(),
|
||||
params.param_str.data());
|
||||
return NvResult::ConfigVarNotFound; // Returns error on production mode
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
bool is_async) {
|
||||
IocCtrlEventWaitParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_async={}",
|
||||
params.syncpt_id, params.threshold, params.timeout, is_async);
|
||||
|
||||
if (params.syncpt_id >= MaxSyncPoints) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
u32 event_id = params.value & 0x00FF;
|
||||
|
||||
if (event_id >= MaxNvEvents) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
if (syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) {
|
||||
params.value = syncpoint_manager.GetSyncpointMin(params.syncpt_id);
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
if (const auto new_value = syncpoint_manager.RefreshSyncpoint(params.syncpt_id);
|
||||
syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) {
|
||||
params.value = new_value;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
auto event = events_interface.events[event_id];
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
// This is mostly to take into account unimplemented features. As synced
|
||||
// gpu is always synced.
|
||||
if (!gpu.IsAsync()) {
|
||||
event.event.writable->Signal();
|
||||
return NvResult::Success;
|
||||
}
|
||||
auto lock = gpu.LockSync();
|
||||
const u32 current_syncpoint_value = event.fence.value;
|
||||
const s32 diff = current_syncpoint_value - params.threshold;
|
||||
if (diff >= 0) {
|
||||
event.event.writable->Signal();
|
||||
params.value = current_syncpoint_value;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
const u32 target_value = current_syncpoint_value - diff;
|
||||
|
||||
if (!is_async) {
|
||||
params.value = 0;
|
||||
}
|
||||
|
||||
if (params.timeout == 0) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Timeout;
|
||||
}
|
||||
|
||||
EventState status = events_interface.status[event_id];
|
||||
if (event_id < MaxNvEvents || status == EventState::Free || status == EventState::Registered) {
|
||||
events_interface.SetEventStatus(event_id, EventState::Waiting);
|
||||
events_interface.assigned_syncpt[event_id] = params.syncpt_id;
|
||||
events_interface.assigned_value[event_id] = target_value;
|
||||
if (is_async) {
|
||||
params.value = params.syncpt_id << 4;
|
||||
} else {
|
||||
params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000;
|
||||
}
|
||||
params.value |= event_id;
|
||||
event.event.writable->Clear();
|
||||
gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value);
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Timeout;
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocCtrlEventRegisterParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
const u32 event_id = params.user_event_id & 0x00FF;
|
||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
if (events_interface.registered[event_id]) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
events_interface.RegisterEvent(event_id);
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
IocCtrlEventUnregisterParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
const u32 event_id = params.user_event_id & 0x00FF;
|
||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
if (!events_interface.registered[event_id]) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
events_interface.UnregisterEvent(event_id);
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocCtrlEventSignalParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
u32 event_id = params.event_id & 0x00FF;
|
||||
LOG_WARNING(Service_NVDRV, "cleared event wait on, event_id: {:X}", event_id);
|
||||
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
if (events_interface.status[event_id] == EventState::Waiting) {
|
||||
events_interface.LiberateEvent(event_id);
|
||||
}
|
||||
|
||||
syncpoint_manager.RefreshSyncpoint(events_interface.events[event_id].fence.id);
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
133
src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
Executable file
133
src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
Executable file
@@ -0,0 +1,133 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_ctrl final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_ctrl(Core::System& system, EventInterface& events_interface,
|
||||
SyncpointManager& syncpoint_manager);
|
||||
~nvhost_ctrl() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
private:
|
||||
struct IocSyncptReadParams {
|
||||
u32_le id{};
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocSyncptReadParams) == 8, "IocSyncptReadParams is incorrect size");
|
||||
|
||||
struct IocSyncptIncrParams {
|
||||
u32_le id{};
|
||||
};
|
||||
static_assert(sizeof(IocSyncptIncrParams) == 4, "IocSyncptIncrParams is incorrect size");
|
||||
|
||||
struct IocSyncptWaitParams {
|
||||
u32_le id{};
|
||||
u32_le thresh{};
|
||||
s32_le timeout{};
|
||||
};
|
||||
static_assert(sizeof(IocSyncptWaitParams) == 12, "IocSyncptWaitParams is incorrect size");
|
||||
|
||||
struct IocModuleMutexParams {
|
||||
u32_le id{};
|
||||
u32_le lock{}; // (0 = unlock and 1 = lock)
|
||||
};
|
||||
static_assert(sizeof(IocModuleMutexParams) == 8, "IocModuleMutexParams is incorrect size");
|
||||
|
||||
struct IocModuleRegRDWRParams {
|
||||
u32_le id{};
|
||||
u32_le num_offsets{};
|
||||
u32_le block_size{};
|
||||
u32_le offsets{};
|
||||
u32_le values{};
|
||||
u32_le write{};
|
||||
};
|
||||
static_assert(sizeof(IocModuleRegRDWRParams) == 24, "IocModuleRegRDWRParams is incorrect size");
|
||||
|
||||
struct IocSyncptWaitexParams {
|
||||
u32_le id{};
|
||||
u32_le thresh{};
|
||||
s32_le timeout{};
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocSyncptWaitexParams) == 16, "IocSyncptWaitexParams is incorrect size");
|
||||
|
||||
struct IocSyncptReadMaxParams {
|
||||
u32_le id{};
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocSyncptReadMaxParams) == 8, "IocSyncptReadMaxParams is incorrect size");
|
||||
|
||||
struct IocGetConfigParams {
|
||||
std::array<char, 0x41> domain_str{};
|
||||
std::array<char, 0x41> param_str{};
|
||||
std::array<char, 0x101> config_str{};
|
||||
};
|
||||
static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventSignalParams {
|
||||
u32_le event_id{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventSignalParams) == 4,
|
||||
"IocCtrlEventSignalParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventWaitParams {
|
||||
u32_le syncpt_id{};
|
||||
u32_le threshold{};
|
||||
s32_le timeout{};
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventWaitParams) == 16, "IocCtrlEventWaitParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventWaitAsyncParams {
|
||||
u32_le syncpt_id{};
|
||||
u32_le threshold{};
|
||||
u32_le timeout{};
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventWaitAsyncParams) == 16,
|
||||
"IocCtrlEventWaitAsyncParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventRegisterParams {
|
||||
u32_le user_event_id{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventRegisterParams) == 4,
|
||||
"IocCtrlEventRegisterParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventUnregisterParams {
|
||||
u32_le user_event_id{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventUnregisterParams) == 4,
|
||||
"IocCtrlEventUnregisterParams is incorrect size");
|
||||
|
||||
struct IocCtrlEventKill {
|
||||
u64_le user_events{};
|
||||
};
|
||||
static_assert(sizeof(IocCtrlEventKill) == 8, "IocCtrlEventKill is incorrect size");
|
||||
|
||||
NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async);
|
||||
NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
EventInterface& events_interface;
|
||||
SyncpointManager& syncpoint_manager;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
282
src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
Executable file
282
src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
Executable file
@@ -0,0 +1,282 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system) : nvdevice(system) {}
|
||||
nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default;
|
||||
|
||||
NvResult nvhost_ctrl_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 'G':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return ZCullGetCtxSize(input, output);
|
||||
case 0x2:
|
||||
return ZCullGetInfo(input, output);
|
||||
case 0x3:
|
||||
return ZBCSetTable(input, output);
|
||||
case 0x4:
|
||||
return ZBCQueryTable(input, output);
|
||||
case 0x5:
|
||||
return GetCharacteristics(input, output);
|
||||
case 0x6:
|
||||
return GetTPCMasks(input, output);
|
||||
case 0x7:
|
||||
return FlushL2(input, output);
|
||||
case 0x14:
|
||||
return GetActiveSlotMask(input, output);
|
||||
case 0x1c:
|
||||
return GetGpuTime(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::Ioctl3(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output, std::vector<u8>& inline_output) {
|
||||
switch (command.group) {
|
||||
case 'G':
|
||||
switch (command.cmd) {
|
||||
case 0x5:
|
||||
return GetCharacteristics(input, output, inline_output);
|
||||
case 0x6:
|
||||
return GetTPCMasks(input, output, inline_output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
IoctlCharacteristics params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
params.gc.arch = 0x120;
|
||||
params.gc.impl = 0xb;
|
||||
params.gc.rev = 0xa1;
|
||||
params.gc.num_gpc = 0x1;
|
||||
params.gc.l2_cache_size = 0x40000;
|
||||
params.gc.on_board_video_memory_size = 0x0;
|
||||
params.gc.num_tpc_per_gpc = 0x2;
|
||||
params.gc.bus_type = 0x20;
|
||||
params.gc.big_page_size = 0x20000;
|
||||
params.gc.compression_page_size = 0x20000;
|
||||
params.gc.pde_coverage_bit_count = 0x1B;
|
||||
params.gc.available_big_page_sizes = 0x30000;
|
||||
params.gc.gpc_mask = 0x1;
|
||||
params.gc.sm_arch_sm_version = 0x503;
|
||||
params.gc.sm_arch_spa_version = 0x503;
|
||||
params.gc.sm_arch_warp_count = 0x80;
|
||||
params.gc.gpu_va_bit_count = 0x28;
|
||||
params.gc.reserved = 0x0;
|
||||
params.gc.flags = 0x55;
|
||||
params.gc.twod_class = 0x902D;
|
||||
params.gc.threed_class = 0xB197;
|
||||
params.gc.compute_class = 0xB1C0;
|
||||
params.gc.gpfifo_class = 0xB06F;
|
||||
params.gc.inline_to_memory_class = 0xA140;
|
||||
params.gc.dma_copy_class = 0xB0B5;
|
||||
params.gc.max_fbps_count = 0x1;
|
||||
params.gc.fbp_en_mask = 0x0;
|
||||
params.gc.max_ltc_per_fbp = 0x2;
|
||||
params.gc.max_lts_per_ltc = 0x1;
|
||||
params.gc.max_tex_per_tpc = 0x0;
|
||||
params.gc.max_gpc_count = 0x1;
|
||||
params.gc.rop_l2_en_mask_0 = 0x21D70;
|
||||
params.gc.rop_l2_en_mask_1 = 0x0;
|
||||
params.gc.chipname = 0x6230326D67;
|
||||
params.gc.gr_compbit_store_base_hw = 0x0;
|
||||
params.gpu_characteristics_buf_size = 0xA0;
|
||||
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
IoctlCharacteristics params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
params.gc.arch = 0x120;
|
||||
params.gc.impl = 0xb;
|
||||
params.gc.rev = 0xa1;
|
||||
params.gc.num_gpc = 0x1;
|
||||
params.gc.l2_cache_size = 0x40000;
|
||||
params.gc.on_board_video_memory_size = 0x0;
|
||||
params.gc.num_tpc_per_gpc = 0x2;
|
||||
params.gc.bus_type = 0x20;
|
||||
params.gc.big_page_size = 0x20000;
|
||||
params.gc.compression_page_size = 0x20000;
|
||||
params.gc.pde_coverage_bit_count = 0x1B;
|
||||
params.gc.available_big_page_sizes = 0x30000;
|
||||
params.gc.gpc_mask = 0x1;
|
||||
params.gc.sm_arch_sm_version = 0x503;
|
||||
params.gc.sm_arch_spa_version = 0x503;
|
||||
params.gc.sm_arch_warp_count = 0x80;
|
||||
params.gc.gpu_va_bit_count = 0x28;
|
||||
params.gc.reserved = 0x0;
|
||||
params.gc.flags = 0x55;
|
||||
params.gc.twod_class = 0x902D;
|
||||
params.gc.threed_class = 0xB197;
|
||||
params.gc.compute_class = 0xB1C0;
|
||||
params.gc.gpfifo_class = 0xB06F;
|
||||
params.gc.inline_to_memory_class = 0xA140;
|
||||
params.gc.dma_copy_class = 0xB0B5;
|
||||
params.gc.max_fbps_count = 0x1;
|
||||
params.gc.fbp_en_mask = 0x0;
|
||||
params.gc.max_ltc_per_fbp = 0x2;
|
||||
params.gc.max_lts_per_ltc = 0x1;
|
||||
params.gc.max_tex_per_tpc = 0x0;
|
||||
params.gc.max_gpc_count = 0x1;
|
||||
params.gc.rop_l2_en_mask_0 = 0x21D70;
|
||||
params.gc.rop_l2_en_mask_1 = 0x0;
|
||||
params.gc.chipname = 0x6230326D67;
|
||||
params.gc.gr_compbit_store_base_hw = 0x0;
|
||||
params.gpu_characteristics_buf_size = 0xA0;
|
||||
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
std::memcpy(inline_output.data(), ¶ms.gc, inline_output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGpuGetTpcMasksArgs params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
|
||||
if (params.mask_buffer_size != 0) {
|
||||
params.tcp_mask = 3;
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
IoctlGpuGetTpcMasksArgs params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
|
||||
if (params.mask_buffer_size != 0) {
|
||||
params.tcp_mask = 3;
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
std::memcpy(inline_output.data(), ¶ms.tcp_mask, inline_output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetActiveSlotMask(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlActiveSlotMask params{};
|
||||
if (input.size() > 0) {
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
}
|
||||
params.slot = 0x07;
|
||||
params.mask = 0x01;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlZcullGetCtxSize params{};
|
||||
if (input.size() > 0) {
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
}
|
||||
params.size = 0x1;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::ZCullGetInfo(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlNvgpuGpuZcullGetInfoArgs params{};
|
||||
|
||||
if (input.size() > 0) {
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
}
|
||||
|
||||
params.width_align_pixels = 0x20;
|
||||
params.height_align_pixels = 0x20;
|
||||
params.pixel_squares_by_aliquots = 0x400;
|
||||
params.aliquot_total = 0x800;
|
||||
params.region_byte_multiplier = 0x20;
|
||||
params.region_header_size = 0x20;
|
||||
params.subregion_header_size = 0xc0;
|
||||
params.subregion_width_align_pixels = 0x20;
|
||||
params.subregion_height_align_pixels = 0x40;
|
||||
params.subregion_count = 0x10;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::ZBCSetTable(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
IoctlZbcSetTable params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
// TODO(ogniK): What does this even actually do?
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
IoctlZbcQueryTable params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
// TODO : To implement properly
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::FlushL2(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
IoctlFlushL2 params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
// TODO : To implement properly
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlGetGpuTime params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
params.gpu_time = static_cast<u64_le>(system.CoreTiming().GetGlobalTimeNs().count());
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
160
src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
Executable file
160
src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
Executable file
@@ -0,0 +1,160 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_ctrl_gpu final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_ctrl_gpu(Core::System& system);
|
||||
~nvhost_ctrl_gpu() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
private:
|
||||
struct IoctlGpuCharacteristics {
|
||||
u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200)
|
||||
u32_le impl; // 0xB (NVGPU_GPU_IMPL_GM20B)
|
||||
u32_le rev; // 0xA1 (Revision A1)
|
||||
u32_le num_gpc; // 0x1
|
||||
u64_le l2_cache_size; // 0x40000
|
||||
u64_le on_board_video_memory_size; // 0x0 (not used)
|
||||
u32_le num_tpc_per_gpc; // 0x2
|
||||
u32_le bus_type; // 0x20 (NVGPU_GPU_BUS_TYPE_AXI)
|
||||
u32_le big_page_size; // 0x20000
|
||||
u32_le compression_page_size; // 0x20000
|
||||
u32_le pde_coverage_bit_count; // 0x1B
|
||||
u32_le available_big_page_sizes; // 0x30000
|
||||
u32_le gpc_mask; // 0x1
|
||||
u32_le sm_arch_sm_version; // 0x503 (Maxwell Generation 5.0.3?)
|
||||
u32_le sm_arch_spa_version; // 0x503 (Maxwell Generation 5.0.3?)
|
||||
u32_le sm_arch_warp_count; // 0x80
|
||||
u32_le gpu_va_bit_count; // 0x28
|
||||
u32_le reserved; // NULL
|
||||
u64_le flags; // 0x55
|
||||
u32_le twod_class; // 0x902D (FERMI_TWOD_A)
|
||||
u32_le threed_class; // 0xB197 (MAXWELL_B)
|
||||
u32_le compute_class; // 0xB1C0 (MAXWELL_COMPUTE_B)
|
||||
u32_le gpfifo_class; // 0xB06F (MAXWELL_CHANNEL_GPFIFO_A)
|
||||
u32_le inline_to_memory_class; // 0xA140 (KEPLER_INLINE_TO_MEMORY_B)
|
||||
u32_le dma_copy_class; // 0xB0B5 (MAXWELL_DMA_COPY_A)
|
||||
u32_le max_fbps_count; // 0x1
|
||||
u32_le fbp_en_mask; // 0x0 (disabled)
|
||||
u32_le max_ltc_per_fbp; // 0x2
|
||||
u32_le max_lts_per_ltc; // 0x1
|
||||
u32_le max_tex_per_tpc; // 0x0 (not supported)
|
||||
u32_le max_gpc_count; // 0x1
|
||||
u32_le rop_l2_en_mask_0; // 0x21D70 (fuse_status_opt_rop_l2_fbp_r)
|
||||
u32_le rop_l2_en_mask_1; // 0x0
|
||||
u64_le chipname; // 0x6230326D67 ("gm20b")
|
||||
u64_le gr_compbit_store_base_hw; // 0x0 (not supported)
|
||||
};
|
||||
static_assert(sizeof(IoctlGpuCharacteristics) == 160,
|
||||
"IoctlGpuCharacteristics is incorrect size");
|
||||
|
||||
struct IoctlCharacteristics {
|
||||
u64_le gpu_characteristics_buf_size; // must not be NULL, but gets overwritten with
|
||||
// 0xA0=max_size
|
||||
u64_le gpu_characteristics_buf_addr; // ignored, but must not be NULL
|
||||
IoctlGpuCharacteristics gc;
|
||||
};
|
||||
static_assert(sizeof(IoctlCharacteristics) == 16 + sizeof(IoctlGpuCharacteristics),
|
||||
"IoctlCharacteristics is incorrect size");
|
||||
|
||||
struct IoctlGpuGetTpcMasksArgs {
|
||||
u32_le mask_buffer_size{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
u64_le mask_buffer_address{};
|
||||
u32_le tcp_mask{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
};
|
||||
static_assert(sizeof(IoctlGpuGetTpcMasksArgs) == 24,
|
||||
"IoctlGpuGetTpcMasksArgs is incorrect size");
|
||||
|
||||
struct IoctlActiveSlotMask {
|
||||
u32_le slot; // always 0x07
|
||||
u32_le mask;
|
||||
};
|
||||
static_assert(sizeof(IoctlActiveSlotMask) == 8, "IoctlActiveSlotMask is incorrect size");
|
||||
|
||||
struct IoctlZcullGetCtxSize {
|
||||
u32_le size;
|
||||
};
|
||||
static_assert(sizeof(IoctlZcullGetCtxSize) == 4, "IoctlZcullGetCtxSize is incorrect size");
|
||||
|
||||
struct IoctlNvgpuGpuZcullGetInfoArgs {
|
||||
u32_le width_align_pixels;
|
||||
u32_le height_align_pixels;
|
||||
u32_le pixel_squares_by_aliquots;
|
||||
u32_le aliquot_total;
|
||||
u32_le region_byte_multiplier;
|
||||
u32_le region_header_size;
|
||||
u32_le subregion_header_size;
|
||||
u32_le subregion_width_align_pixels;
|
||||
u32_le subregion_height_align_pixels;
|
||||
u32_le subregion_count;
|
||||
};
|
||||
static_assert(sizeof(IoctlNvgpuGpuZcullGetInfoArgs) == 40,
|
||||
"IoctlNvgpuGpuZcullGetInfoArgs is incorrect size");
|
||||
|
||||
struct IoctlZbcSetTable {
|
||||
u32_le color_ds[4];
|
||||
u32_le color_l2[4];
|
||||
u32_le depth;
|
||||
u32_le format;
|
||||
u32_le type;
|
||||
};
|
||||
static_assert(sizeof(IoctlZbcSetTable) == 44, "IoctlZbcSetTable is incorrect size");
|
||||
|
||||
struct IoctlZbcQueryTable {
|
||||
u32_le color_ds[4];
|
||||
u32_le color_l2[4];
|
||||
u32_le depth;
|
||||
u32_le ref_cnt;
|
||||
u32_le format;
|
||||
u32_le type;
|
||||
u32_le index_size;
|
||||
};
|
||||
static_assert(sizeof(IoctlZbcQueryTable) == 52, "IoctlZbcQueryTable is incorrect size");
|
||||
|
||||
struct IoctlFlushL2 {
|
||||
u32_le flush; // l2_flush | l2_invalidate << 1 | fb_flush << 2
|
||||
u32_le reserved;
|
||||
};
|
||||
static_assert(sizeof(IoctlFlushL2) == 8, "IoctlFlushL2 is incorrect size");
|
||||
|
||||
struct IoctlGetGpuTime {
|
||||
u64_le gpu_time{};
|
||||
INSERT_PADDING_WORDS(2);
|
||||
};
|
||||
static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size");
|
||||
|
||||
NvResult GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output);
|
||||
|
||||
NvResult GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output);
|
||||
|
||||
NvResult GetActiveSlotMask(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ZCullGetInfo(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ZBCSetTable(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
321
src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
Executable file
321
src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
Executable file
@@ -0,0 +1,321 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_gpu::nvhost_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev,
|
||||
SyncpointManager& syncpoint_manager)
|
||||
: nvdevice(system), nvmap_dev(std::move(nvmap_dev)), syncpoint_manager{syncpoint_manager} {
|
||||
channel_fence.id = syncpoint_manager.AllocateSyncpoint();
|
||||
channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id);
|
||||
}
|
||||
|
||||
nvhost_gpu::~nvhost_gpu() = default;
|
||||
|
||||
NvResult nvhost_gpu::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x3:
|
||||
return GetWaitbase(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'H':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return SetNVMAPfd(input, output);
|
||||
case 0x3:
|
||||
return ChannelSetTimeout(input, output);
|
||||
case 0x8:
|
||||
return SubmitGPFIFOBase(input, output, false);
|
||||
case 0x9:
|
||||
return AllocateObjectContext(input, output);
|
||||
case 0xb:
|
||||
return ZCullBind(input, output);
|
||||
case 0xc:
|
||||
return SetErrorNotifier(input, output);
|
||||
case 0xd:
|
||||
return SetChannelPriority(input, output);
|
||||
case 0x1a:
|
||||
return AllocGPFIFOEx2(input, output);
|
||||
case 0x1b:
|
||||
return SubmitGPFIFOBase(input, output, true);
|
||||
case 0x1d:
|
||||
return ChannelSetTimeslice(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'G':
|
||||
switch (command.cmd) {
|
||||
case 0x14:
|
||||
return SetClientData(input, output);
|
||||
case 0x15:
|
||||
return GetClientData(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
};
|
||||
|
||||
NvResult nvhost_gpu::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 'H':
|
||||
switch (command.cmd) {
|
||||
case 0x1b:
|
||||
return SubmitGPFIFOBase(input, inline_input, output);
|
||||
}
|
||||
break;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSetNvmapFD params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
||||
|
||||
nvmap_fd = params.nvmap_fd;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SetClientData(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlClientData params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
user_data = params.data;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::GetClientData(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlClientData params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
params.data = user_data;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::ZCullBind(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
std::memcpy(&zcull_params, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "called, gpu_va={:X}, mode={:X}", zcull_params.gpu_va,
|
||||
zcull_params.mode);
|
||||
|
||||
std::memcpy(output.data(), &zcull_params, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SetErrorNotifier(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSetErrorNotifier params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, offset={:X}, size={:X}, mem={:X}", params.offset,
|
||||
params.size, params.mem);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SetChannelPriority(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
std::memcpy(&channel_priority, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority);
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlAllocGpfifoEx2 params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_WARNING(Service_NVDRV,
|
||||
"(STUBBED) called, num_entries={:X}, flags={:X}, unk0={:X}, "
|
||||
"unk1={:X}, unk2={:X}, unk3={:X}",
|
||||
params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
|
||||
params.unk3);
|
||||
|
||||
channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id);
|
||||
|
||||
params.fence_out = channel_fence;
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlAllocObjCtx params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, class_num={:X}, flags={:X}", params.class_num,
|
||||
params.flags);
|
||||
|
||||
params.obj_id = 0x0;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
static std::vector<Tegra::CommandHeader> BuildWaitCommandList(Fence fence) {
|
||||
return {
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
{fence.value},
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
Tegra::GPU::FenceAction::Build(Tegra::GPU::FenceOperation::Acquire, fence.id),
|
||||
};
|
||||
}
|
||||
|
||||
static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(Fence fence, u32 add_increment) {
|
||||
std::vector<Tegra::CommandHeader> result{
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
{}};
|
||||
|
||||
for (u32 count = 0; count < add_increment; ++count) {
|
||||
result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
|
||||
Tegra::SubmissionMode::Increasing));
|
||||
result.emplace_back(
|
||||
Tegra::GPU::FenceAction::Build(Tegra::GPU::FenceOperation::Increment, fence.id));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(Fence fence,
|
||||
u32 add_increment) {
|
||||
std::vector<Tegra::CommandHeader> result{
|
||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForInterrupt, 1,
|
||||
Tegra::SubmissionMode::Increasing),
|
||||
{}};
|
||||
const std::vector<Tegra::CommandHeader> increment{
|
||||
BuildIncrementCommandList(fence, add_increment)};
|
||||
|
||||
result.insert(result.end(), increment.begin(), increment.end());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>& output,
|
||||
Tegra::CommandList&& entries) {
|
||||
LOG_TRACE(Service_NVDRV, "called, gpfifo={:X}, num_entries={:X}, flags={:X}", params.address,
|
||||
params.num_entries, params.flags.raw);
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
params.fence_out.id = channel_fence.id;
|
||||
|
||||
if (params.flags.add_wait.Value() &&
|
||||
!syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) {
|
||||
gpu.PushGPUEntries(Tegra::CommandList{BuildWaitCommandList(params.fence_out)});
|
||||
}
|
||||
|
||||
if (params.flags.add_increment.Value() || params.flags.increment.Value()) {
|
||||
const u32 increment_value = params.flags.increment.Value() ? params.fence_out.value : 0;
|
||||
params.fence_out.value = syncpoint_manager.IncreaseSyncpoint(
|
||||
params.fence_out.id, params.AddIncrementValue() + increment_value);
|
||||
} else {
|
||||
params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id);
|
||||
}
|
||||
|
||||
gpu.PushGPUEntries(std::move(entries));
|
||||
|
||||
if (params.flags.add_increment.Value()) {
|
||||
if (params.flags.suppress_wfi) {
|
||||
gpu.PushGPUEntries(Tegra::CommandList{
|
||||
BuildIncrementCommandList(params.fence_out, params.AddIncrementValue())});
|
||||
} else {
|
||||
gpu.PushGPUEntries(Tegra::CommandList{
|
||||
BuildIncrementWithWfiCommandList(params.fence_out, params.AddIncrementValue())});
|
||||
}
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SubmitGPFIFOBase(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
bool kickoff) {
|
||||
if (input.size() < sizeof(IoctlSubmitGpfifo)) {
|
||||
UNIMPLEMENTED();
|
||||
return NvResult::InvalidSize;
|
||||
}
|
||||
IoctlSubmitGpfifo params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo));
|
||||
Tegra::CommandList entries(params.num_entries);
|
||||
|
||||
if (kickoff) {
|
||||
system.Memory().ReadBlock(params.address, entries.command_lists.data(),
|
||||
params.num_entries * sizeof(Tegra::CommandListHeader));
|
||||
} else {
|
||||
std::memcpy(entries.command_lists.data(), &input[sizeof(IoctlSubmitGpfifo)],
|
||||
params.num_entries * sizeof(Tegra::CommandListHeader));
|
||||
}
|
||||
|
||||
return SubmitGPFIFOImpl(params, output, std::move(entries));
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::SubmitGPFIFOBase(const std::vector<u8>& input,
|
||||
const std::vector<u8>& input_inline,
|
||||
std::vector<u8>& output) {
|
||||
if (input.size() < sizeof(IoctlSubmitGpfifo)) {
|
||||
UNIMPLEMENTED();
|
||||
return NvResult::InvalidSize;
|
||||
}
|
||||
IoctlSubmitGpfifo params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo));
|
||||
Tegra::CommandList entries(params.num_entries);
|
||||
std::memcpy(entries.command_lists.data(), input_inline.data(), input_inline.size());
|
||||
return SubmitGPFIFOImpl(params, output, std::move(entries));
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetWaitbase params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase));
|
||||
LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown);
|
||||
|
||||
params.value = 0; // Seems to be hard coded at 0
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlChannelSetTimeout params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlChannelSetTimeout));
|
||||
LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout);
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSetTimeslice params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSetTimeslice));
|
||||
LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice);
|
||||
|
||||
channel_timeslice = params.timeslice;
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
195
src/core/hle/service/nvdrv/devices/nvhost_gpu.h
Executable file
195
src/core/hle/service/nvdrv/devices/nvhost_gpu.h
Executable file
@@ -0,0 +1,195 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "video_core/dma_pusher.h"
|
||||
|
||||
namespace Service::Nvidia {
|
||||
class SyncpointManager;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvmap;
|
||||
class nvhost_gpu final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev,
|
||||
SyncpointManager& syncpoint_manager);
|
||||
~nvhost_gpu() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
private:
|
||||
enum class CtxObjects : u32_le {
|
||||
Ctx2D = 0x902D,
|
||||
Ctx3D = 0xB197,
|
||||
CtxCompute = 0xB1C0,
|
||||
CtxKepler = 0xA140,
|
||||
CtxDMA = 0xB0B5,
|
||||
CtxChannelGPFIFO = 0xB06F,
|
||||
};
|
||||
|
||||
struct IoctlSetNvmapFD {
|
||||
s32_le nvmap_fd{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSetNvmapFD) == 4, "IoctlSetNvmapFD is incorrect size");
|
||||
|
||||
struct IoctlChannelSetTimeout {
|
||||
u32_le timeout{};
|
||||
};
|
||||
static_assert(sizeof(IoctlChannelSetTimeout) == 4, "IoctlChannelSetTimeout is incorrect size");
|
||||
|
||||
struct IoctlAllocGPFIFO {
|
||||
u32_le num_entries{};
|
||||
u32_le flags{};
|
||||
};
|
||||
static_assert(sizeof(IoctlAllocGPFIFO) == 8, "IoctlAllocGPFIFO is incorrect size");
|
||||
|
||||
struct IoctlClientData {
|
||||
u64_le data{};
|
||||
};
|
||||
static_assert(sizeof(IoctlClientData) == 8, "IoctlClientData is incorrect size");
|
||||
|
||||
struct IoctlZCullBind {
|
||||
u64_le gpu_va{};
|
||||
u32_le mode{}; // 0=global, 1=no_ctxsw, 2=separate_buffer, 3=part_of_regular_buf
|
||||
INSERT_PADDING_WORDS(1);
|
||||
};
|
||||
static_assert(sizeof(IoctlZCullBind) == 16, "IoctlZCullBind is incorrect size");
|
||||
|
||||
struct IoctlSetErrorNotifier {
|
||||
u64_le offset{};
|
||||
u64_le size{};
|
||||
u32_le mem{}; // nvmap object handle
|
||||
INSERT_PADDING_WORDS(1);
|
||||
};
|
||||
static_assert(sizeof(IoctlSetErrorNotifier) == 24, "IoctlSetErrorNotifier is incorrect size");
|
||||
|
||||
struct IoctlChannelSetPriority {
|
||||
u32_le priority{};
|
||||
};
|
||||
static_assert(sizeof(IoctlChannelSetPriority) == 4,
|
||||
"IoctlChannelSetPriority is incorrect size");
|
||||
|
||||
struct IoctlSetTimeslice {
|
||||
u32_le timeslice{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSetTimeslice) == 4, "IoctlSetTimeslice is incorrect size");
|
||||
|
||||
struct IoctlEventIdControl {
|
||||
u32_le cmd{}; // 0=disable, 1=enable, 2=clear
|
||||
u32_le id{};
|
||||
};
|
||||
static_assert(sizeof(IoctlEventIdControl) == 8, "IoctlEventIdControl is incorrect size");
|
||||
|
||||
struct IoctlGetErrorNotification {
|
||||
u64_le timestamp{};
|
||||
u32_le info32{};
|
||||
u16_le info16{};
|
||||
u16_le status{}; // always 0xFFFF
|
||||
};
|
||||
static_assert(sizeof(IoctlGetErrorNotification) == 16,
|
||||
"IoctlGetErrorNotification is incorrect size");
|
||||
|
||||
static_assert(sizeof(Fence) == 8, "Fence is incorrect size");
|
||||
|
||||
struct IoctlAllocGpfifoEx {
|
||||
u32_le num_entries{};
|
||||
u32_le flags{};
|
||||
u32_le unk0{};
|
||||
u32_le unk1{};
|
||||
u32_le unk2{};
|
||||
u32_le unk3{};
|
||||
u32_le unk4{};
|
||||
u32_le unk5{};
|
||||
};
|
||||
static_assert(sizeof(IoctlAllocGpfifoEx) == 32, "IoctlAllocGpfifoEx is incorrect size");
|
||||
|
||||
struct IoctlAllocGpfifoEx2 {
|
||||
u32_le num_entries{}; // in
|
||||
u32_le flags{}; // in
|
||||
u32_le unk0{}; // in (1 works)
|
||||
Fence fence_out{}; // out
|
||||
u32_le unk1{}; // in
|
||||
u32_le unk2{}; // in
|
||||
u32_le unk3{}; // in
|
||||
};
|
||||
static_assert(sizeof(IoctlAllocGpfifoEx2) == 32, "IoctlAllocGpfifoEx2 is incorrect size");
|
||||
|
||||
struct IoctlAllocObjCtx {
|
||||
u32_le class_num{}; // 0x902D=2d, 0xB197=3d, 0xB1C0=compute, 0xA140=kepler, 0xB0B5=DMA,
|
||||
// 0xB06F=channel_gpfifo
|
||||
u32_le flags{};
|
||||
u64_le obj_id{}; // (ignored) used for FREE_OBJ_CTX ioctl, which is not supported
|
||||
};
|
||||
static_assert(sizeof(IoctlAllocObjCtx) == 16, "IoctlAllocObjCtx is incorrect size");
|
||||
|
||||
struct IoctlSubmitGpfifo {
|
||||
u64_le address{}; // pointer to gpfifo entry structs
|
||||
u32_le num_entries{}; // number of fence objects being submitted
|
||||
union {
|
||||
u32_le raw;
|
||||
BitField<0, 1, u32_le> add_wait; // append a wait sync_point to the list
|
||||
BitField<1, 1, u32_le> add_increment; // append an increment to the list
|
||||
BitField<2, 1, u32_le> new_hw_format; // mostly ignored
|
||||
BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt
|
||||
BitField<8, 1, u32_le> increment; // increment the returned fence
|
||||
} flags;
|
||||
Fence fence_out{}; // returned new fence object for others to wait on
|
||||
|
||||
u32 AddIncrementValue() const {
|
||||
return flags.add_increment.Value() << 1;
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(Fence),
|
||||
"IoctlSubmitGpfifo is incorrect size");
|
||||
|
||||
struct IoctlGetWaitbase {
|
||||
u32 unknown{}; // seems to be ignored? Nintendo added this
|
||||
u32 value{};
|
||||
};
|
||||
static_assert(sizeof(IoctlGetWaitbase) == 8, "IoctlGetWaitbase is incorrect size");
|
||||
|
||||
s32_le nvmap_fd{};
|
||||
u64_le user_data{};
|
||||
IoctlZCullBind zcull_params{};
|
||||
u32_le channel_priority{};
|
||||
u32_le channel_timeslice{};
|
||||
|
||||
NvResult SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SetClientData(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetClientData(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ZCullBind(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SetErrorNotifier(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SetChannelPriority(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult AllocateObjectContext(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>& output,
|
||||
Tegra::CommandList&& entries);
|
||||
NvResult SubmitGPFIFOBase(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
bool kickoff = false);
|
||||
NvResult SubmitGPFIFOBase(const std::vector<u8>& input, const std::vector<u8>& input_inline,
|
||||
std::vector<u8>& output);
|
||||
NvResult GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
SyncpointManager& syncpoint_manager;
|
||||
Fence channel_fence;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
73
src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
Executable file
73
src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
Executable file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_nvdec::nvhost_nvdec(Core::System& system, std::shared_ptr<nvmap> nvmap_dev)
|
||||
: nvhost_nvdec_common(system, std::move(nvmap_dev)) {}
|
||||
nvhost_nvdec::~nvhost_nvdec() = default;
|
||||
|
||||
NvResult nvhost_nvdec::Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return Submit(input, output);
|
||||
case 0x2:
|
||||
return GetSyncpoint(input, output);
|
||||
case 0x3:
|
||||
return GetWaitbase(input, output);
|
||||
case 0x7:
|
||||
return SetSubmitTimeout(input, output);
|
||||
case 0x9:
|
||||
return MapBuffer(input, output);
|
||||
case 0xa: {
|
||||
if (command.length == 0x1c) {
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
|
||||
Tegra::ChCommandHeaderList cmdlist(1);
|
||||
cmdlist[0] = Tegra::ChCommandHeader{0xDEADB33F};
|
||||
system.GPU().PushCommandBuffer(cmdlist);
|
||||
system.GPU().MemoryManager().InvalidateQueuedCaches();
|
||||
}
|
||||
return UnmapBuffer(input, output);
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'H':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return SetNVMAPfd(input);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
24
src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
Executable file
24
src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
Executable file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_nvdec final : public nvhost_nvdec_common {
|
||||
public:
|
||||
explicit nvhost_nvdec(Core::System& system, std::shared_ptr<nvmap> nvmap_dev);
|
||||
~nvhost_nvdec() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
235
src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
Executable file
235
src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
Executable file
@@ -0,0 +1,235 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
namespace {
|
||||
// Splice vectors will copy count amount of type T from the input vector into the dst vector.
|
||||
template <typename T>
|
||||
std::size_t SpliceVectors(const std::vector<u8>& input, std::vector<T>& dst, std::size_t count,
|
||||
std::size_t offset) {
|
||||
std::memcpy(dst.data(), input.data() + offset, count * sizeof(T));
|
||||
offset += count * sizeof(T);
|
||||
return offset;
|
||||
}
|
||||
|
||||
// Write vectors will write data to the output buffer
|
||||
template <typename T>
|
||||
std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::size_t offset) {
|
||||
std::memcpy(dst.data() + offset, src.data(), src.size() * sizeof(T));
|
||||
offset += src.size() * sizeof(T);
|
||||
return offset;
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system, std::shared_ptr<nvmap> nvmap_dev)
|
||||
: nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {}
|
||||
nvhost_nvdec_common::~nvhost_nvdec_common() = default;
|
||||
|
||||
NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
|
||||
IoctlSetNvmapFD params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSetNvmapFD));
|
||||
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
||||
|
||||
nvmap_fd = params.nvmap_fd;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::Submit(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSubmit params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmit));
|
||||
LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count);
|
||||
|
||||
// Instantiate param buffers
|
||||
std::size_t offset = sizeof(IoctlSubmit);
|
||||
std::vector<CommandBuffer> command_buffers(params.cmd_buffer_count);
|
||||
std::vector<Reloc> relocs(params.relocation_count);
|
||||
std::vector<u32> reloc_shifts(params.relocation_count);
|
||||
std::vector<SyncptIncr> syncpt_increments(params.syncpoint_count);
|
||||
std::vector<SyncptIncr> wait_checks(params.syncpoint_count);
|
||||
std::vector<Fence> fences(params.fence_count);
|
||||
|
||||
// Splice input into their respective buffers
|
||||
offset = SpliceVectors(input, command_buffers, params.cmd_buffer_count, offset);
|
||||
offset = SpliceVectors(input, relocs, params.relocation_count, offset);
|
||||
offset = SpliceVectors(input, reloc_shifts, params.relocation_count, offset);
|
||||
offset = SpliceVectors(input, syncpt_increments, params.syncpoint_count, offset);
|
||||
offset = SpliceVectors(input, wait_checks, params.syncpoint_count, offset);
|
||||
offset = SpliceVectors(input, fences, params.fence_count, offset);
|
||||
|
||||
// TODO(ameerj): For async gpu, utilize fences for syncpoint 'max' increment
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
for (const auto& cmd_buffer : command_buffers) {
|
||||
auto object = nvmap_dev->GetObject(cmd_buffer.memory_id);
|
||||
ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
|
||||
const auto map = FindBufferMap(object->dma_map_addr);
|
||||
if (!map) {
|
||||
LOG_ERROR(Service_NVDRV, "Tried to submit an invalid offset 0x{:X} dma 0x{:X}",
|
||||
object->addr, object->dma_map_addr);
|
||||
return NvResult::Success;
|
||||
}
|
||||
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
|
||||
gpu.MemoryManager().ReadBlock(map->StartAddr() + cmd_buffer.offset, cmdlist.data(),
|
||||
cmdlist.size() * sizeof(u32));
|
||||
gpu.PushCommandBuffer(cmdlist);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit));
|
||||
// Some games expect command_buffers to be written back
|
||||
offset = sizeof(IoctlSubmit);
|
||||
offset = WriteVectors(output, command_buffers, offset);
|
||||
offset = WriteVectors(output, relocs, offset);
|
||||
offset = WriteVectors(output, reloc_shifts, offset);
|
||||
offset = WriteVectors(output, syncpt_increments, offset);
|
||||
offset = WriteVectors(output, wait_checks, offset);
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetSyncpoint params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint));
|
||||
LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
|
||||
|
||||
// We found that implementing this causes deadlocks with async gpu, along with degraded
|
||||
// performance. TODO: RE the nvdec async implementation
|
||||
params.value = 0;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint));
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetWaitbase params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase));
|
||||
params.value = 0; // Seems to be hard coded at 0
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlMapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer));
|
||||
std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
|
||||
|
||||
SpliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
for (auto& cmf_buff : cmd_buffer_handles) {
|
||||
auto object{nvmap_dev->GetObject(cmf_buff.map_handle)};
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmf_buff.map_handle);
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
if (object->dma_map_addr == 0) {
|
||||
// NVDEC and VIC memory is in the 32-bit address space
|
||||
// MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space
|
||||
const GPUVAddr low_addr = gpu.MemoryManager().MapAllocate32(object->addr, object->size);
|
||||
object->dma_map_addr = static_cast<u32>(low_addr);
|
||||
// Ensure that the dma_map_addr is indeed in the lower 32-bit address space.
|
||||
ASSERT(object->dma_map_addr == low_addr);
|
||||
}
|
||||
if (!object->dma_map_addr) {
|
||||
LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size);
|
||||
} else {
|
||||
cmf_buff.map_address = object->dma_map_addr;
|
||||
AddBufferMap(object->dma_map_addr, object->size, object->addr,
|
||||
object->status == nvmap::Object::Status::Allocated);
|
||||
}
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer));
|
||||
std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(),
|
||||
cmd_buffer_handles.size() * sizeof(MapBufferEntry));
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlMapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer));
|
||||
std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
|
||||
SpliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
|
||||
for (auto& cmf_buff : cmd_buffer_handles) {
|
||||
const auto object{nvmap_dev->GetObject(cmf_buff.map_handle)};
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmf_buff.map_handle);
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
if (const auto size{RemoveBufferMap(object->dma_map_addr)}; size) {
|
||||
if (vic_device) {
|
||||
// UnmapVicFrame defers texture_cache invalidation of the frame address until
|
||||
// the stream is over
|
||||
gpu.MemoryManager().UnmapVicFrame(object->dma_map_addr, *size);
|
||||
} else {
|
||||
gpu.MemoryManager().Unmap(object->dma_map_addr, *size);
|
||||
}
|
||||
} else {
|
||||
// This occurs quite frequently, however does not seem to impact functionality
|
||||
LOG_DEBUG(Service_NVDRV, "invalid offset=0x{:X} dma=0x{:X}", object->addr,
|
||||
object->dma_map_addr);
|
||||
}
|
||||
object->dma_map_addr = 0;
|
||||
}
|
||||
std::memset(output.data(), 0, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::SetSubmitTimeout(const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
std::memcpy(&submit_timeout, input.data(), input.size());
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
std::optional<nvhost_nvdec_common::BufferMap> nvhost_nvdec_common::FindBufferMap(
|
||||
GPUVAddr gpu_addr) const {
|
||||
const auto it = std::find_if(
|
||||
buffer_mappings.begin(), buffer_mappings.upper_bound(gpu_addr), [&](const auto& entry) {
|
||||
return (gpu_addr >= entry.second.StartAddr() && gpu_addr < entry.second.EndAddr());
|
||||
});
|
||||
|
||||
ASSERT(it != buffer_mappings.end());
|
||||
return it->second;
|
||||
}
|
||||
|
||||
void nvhost_nvdec_common::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr,
|
||||
bool is_allocated) {
|
||||
buffer_mappings.insert_or_assign(gpu_addr, BufferMap{gpu_addr, size, cpu_addr, is_allocated});
|
||||
}
|
||||
|
||||
std::optional<std::size_t> nvhost_nvdec_common::RemoveBufferMap(GPUVAddr gpu_addr) {
|
||||
const auto iter{buffer_mappings.find(gpu_addr)};
|
||||
if (iter == buffer_mappings.end()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
std::size_t size = 0;
|
||||
if (iter->second.IsAllocated()) {
|
||||
size = iter->second.Size();
|
||||
}
|
||||
buffer_mappings.erase(iter);
|
||||
return size;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
165
src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
Executable file
165
src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
Executable file
@@ -0,0 +1,165 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
class nvmap;
|
||||
|
||||
class nvhost_nvdec_common : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_nvdec_common(Core::System& system, std::shared_ptr<nvmap> nvmap_dev);
|
||||
~nvhost_nvdec_common() override;
|
||||
|
||||
protected:
|
||||
class BufferMap final {
|
||||
public:
|
||||
constexpr BufferMap() = default;
|
||||
|
||||
constexpr BufferMap(GPUVAddr start_addr, std::size_t size)
|
||||
: start_addr{start_addr}, end_addr{start_addr + size} {}
|
||||
|
||||
constexpr BufferMap(GPUVAddr start_addr, std::size_t size, VAddr cpu_addr,
|
||||
bool is_allocated)
|
||||
: start_addr{start_addr}, end_addr{start_addr + size}, cpu_addr{cpu_addr},
|
||||
is_allocated{is_allocated} {}
|
||||
|
||||
constexpr VAddr StartAddr() const {
|
||||
return start_addr;
|
||||
}
|
||||
|
||||
constexpr VAddr EndAddr() const {
|
||||
return end_addr;
|
||||
}
|
||||
|
||||
constexpr std::size_t Size() const {
|
||||
return end_addr - start_addr;
|
||||
}
|
||||
|
||||
constexpr VAddr CpuAddr() const {
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
constexpr bool IsAllocated() const {
|
||||
return is_allocated;
|
||||
}
|
||||
|
||||
private:
|
||||
GPUVAddr start_addr{};
|
||||
GPUVAddr end_addr{};
|
||||
VAddr cpu_addr{};
|
||||
bool is_allocated{};
|
||||
};
|
||||
|
||||
struct IoctlSetNvmapFD {
|
||||
s32_le nvmap_fd{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSetNvmapFD) == 4, "IoctlSetNvmapFD is incorrect size");
|
||||
|
||||
struct IoctlSubmitCommandBuffer {
|
||||
u32_le id{};
|
||||
u32_le offset{};
|
||||
u32_le count{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSubmitCommandBuffer) == 0xC,
|
||||
"IoctlSubmitCommandBuffer is incorrect size");
|
||||
struct IoctlSubmit {
|
||||
u32_le cmd_buffer_count{};
|
||||
u32_le relocation_count{};
|
||||
u32_le syncpoint_count{};
|
||||
u32_le fence_count{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSubmit) == 0x10, "IoctlSubmit has incorrect size");
|
||||
|
||||
struct CommandBuffer {
|
||||
s32 memory_id{};
|
||||
u32 offset{};
|
||||
s32 word_count{};
|
||||
};
|
||||
static_assert(sizeof(CommandBuffer) == 0xC, "CommandBuffer has incorrect size");
|
||||
|
||||
struct Reloc {
|
||||
s32 cmdbuffer_memory{};
|
||||
s32 cmdbuffer_offset{};
|
||||
s32 target{};
|
||||
s32 target_offset{};
|
||||
};
|
||||
static_assert(sizeof(Reloc) == 0x10, "CommandBuffer has incorrect size");
|
||||
|
||||
struct SyncptIncr {
|
||||
u32 id{};
|
||||
u32 increments{};
|
||||
};
|
||||
static_assert(sizeof(SyncptIncr) == 0x8, "CommandBuffer has incorrect size");
|
||||
|
||||
struct Fence {
|
||||
u32 id{};
|
||||
u32 value{};
|
||||
};
|
||||
static_assert(sizeof(Fence) == 0x8, "CommandBuffer has incorrect size");
|
||||
|
||||
struct IoctlGetSyncpoint {
|
||||
// Input
|
||||
u32_le param{};
|
||||
// Output
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IoctlGetSyncpoint) == 8, "IocGetIdParams has wrong size");
|
||||
|
||||
struct IoctlGetWaitbase {
|
||||
u32_le unknown{}; // seems to be ignored? Nintendo added this
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IoctlGetWaitbase) == 0x8, "IoctlGetWaitbase is incorrect size");
|
||||
|
||||
struct IoctlMapBuffer {
|
||||
u32_le num_entries{};
|
||||
u32_le data_address{}; // Ignored by the driver.
|
||||
u32_le attach_host_ch_das{};
|
||||
};
|
||||
static_assert(sizeof(IoctlMapBuffer) == 0x0C, "IoctlMapBuffer is incorrect size");
|
||||
|
||||
struct IocGetIdParams {
|
||||
// Input
|
||||
u32_le param{};
|
||||
// Output
|
||||
u32_le value{};
|
||||
};
|
||||
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
||||
|
||||
// Used for mapping and unmapping command buffers
|
||||
struct MapBufferEntry {
|
||||
u32_le map_handle{};
|
||||
u32_le map_address{};
|
||||
};
|
||||
static_assert(sizeof(IoctlMapBuffer) == 0x0C, "IoctlMapBuffer is incorrect size");
|
||||
|
||||
/// Ioctl command implementations
|
||||
NvResult SetNVMAPfd(const std::vector<u8>& input);
|
||||
NvResult Submit(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult MapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const;
|
||||
void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated);
|
||||
std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr);
|
||||
|
||||
s32_le nvmap_fd{};
|
||||
u32_le submit_timeout{};
|
||||
bool vic_device{};
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
|
||||
// This is expected to be ordered, therefore we must use a map, not unordered_map
|
||||
std::map<GPUVAddr, BufferMap> buffer_mappings;
|
||||
};
|
||||
}; // namespace Service::Nvidia::Devices
|
56
src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
Executable file
56
src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
Executable file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvhost_nvjpg::nvhost_nvjpg(Core::System& system) : nvdevice(system) {}
|
||||
nvhost_nvjpg::~nvhost_nvjpg() = default;
|
||||
|
||||
NvResult nvhost_nvjpg::Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 'H':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return SetNVMAPfd(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvjpg::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvjpg::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvjpg::SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSetNvmapFD params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
||||
|
||||
nvmap_fd = params.nvmap_fd;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
36
src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
Executable file
36
src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
Executable file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvhost_nvjpg final : public nvdevice {
|
||||
public:
|
||||
explicit nvhost_nvjpg(Core::System& system);
|
||||
~nvhost_nvjpg() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
private:
|
||||
struct IoctlSetNvmapFD {
|
||||
s32_le nvmap_fd{};
|
||||
};
|
||||
static_assert(sizeof(IoctlSetNvmapFD) == 4, "IoctlSetNvmapFD is incorrect size");
|
||||
|
||||
s32_le nvmap_fd{};
|
||||
|
||||
NvResult SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
65
src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
Executable file
65
src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
Executable file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
nvhost_vic::nvhost_vic(Core::System& system, std::shared_ptr<nvmap> nvmap_dev)
|
||||
: nvhost_nvdec_common(system, std::move(nvmap_dev)) {
|
||||
vic_device = true;
|
||||
}
|
||||
nvhost_vic::~nvhost_vic() = default;
|
||||
|
||||
NvResult nvhost_vic::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return Submit(input, output);
|
||||
case 0x2:
|
||||
return GetSyncpoint(input, output);
|
||||
case 0x3:
|
||||
return GetWaitbase(input, output);
|
||||
case 0x9:
|
||||
return MapBuffer(input, output);
|
||||
case 0xa:
|
||||
return UnmapBuffer(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'H':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return SetNVMAPfd(input);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_vic::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvhost_vic::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
23
src/core/hle/service/nvdrv/devices/nvhost_vic.h
Executable file
23
src/core/hle/service/nvdrv/devices/nvhost_vic.h
Executable file
@@ -0,0 +1,23 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
class nvmap;
|
||||
|
||||
class nvhost_vic final : public nvhost_nvdec_common {
|
||||
public:
|
||||
explicit nvhost_vic(Core::System& system, std::shared_ptr<nvmap> nvmap_dev);
|
||||
~nvhost_vic();
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
};
|
||||
} // namespace Service::Nvidia::Devices
|
278
src/core/hle/service/nvdrv/devices/nvmap.cpp
Executable file
278
src/core/hle/service/nvdrv/devices/nvmap.cpp
Executable file
@@ -0,0 +1,278 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
nvmap::nvmap(Core::System& system) : nvdevice(system) {
|
||||
// Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to
|
||||
// represent this.
|
||||
CreateObject(0);
|
||||
}
|
||||
|
||||
nvmap::~nvmap() = default;
|
||||
|
||||
NvResult nvmap::Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
switch (command.group) {
|
||||
case 0x1:
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return IocCreate(input, output);
|
||||
case 0x3:
|
||||
return IocFromId(input, output);
|
||||
case 0x4:
|
||||
return IocAlloc(input, output);
|
||||
case 0x5:
|
||||
return IocFree(input, output);
|
||||
case 0x9:
|
||||
return IocParam(input, output);
|
||||
case 0xe:
|
||||
return IocGetId(input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvmap::Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
NvResult nvmap::Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
VAddr nvmap::GetObjectAddress(u32 handle) const {
|
||||
auto object = GetObject(handle);
|
||||
ASSERT(object);
|
||||
ASSERT(object->status == Object::Status::Allocated);
|
||||
return object->addr;
|
||||
}
|
||||
|
||||
u32 nvmap::CreateObject(u32 size) {
|
||||
// Create a new nvmap object and obtain a handle to it.
|
||||
auto object = std::make_shared<Object>();
|
||||
object->id = next_id++;
|
||||
object->size = size;
|
||||
object->status = Object::Status::Created;
|
||||
object->refcount = 1;
|
||||
|
||||
const u32 handle = next_handle++;
|
||||
|
||||
handles.insert_or_assign(handle, std::move(object));
|
||||
|
||||
return handle;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocCreateParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
LOG_DEBUG(Service_NVDRV, "size=0x{:08X}", params.size);
|
||||
|
||||
if (!params.size) {
|
||||
LOG_ERROR(Service_NVDRV, "Size is 0");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
params.handle = CreateObject(params.size);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocAllocParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.addr);
|
||||
|
||||
if (!params.handle) {
|
||||
LOG_ERROR(Service_NVDRV, "Handle is 0");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if ((params.align - 1) & params.align) {
|
||||
LOG_ERROR(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
const u32 min_alignment = 0x1000;
|
||||
if (params.align < min_alignment) {
|
||||
params.align = min_alignment;
|
||||
}
|
||||
|
||||
auto object = GetObject(params.handle);
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if (object->status == Object::Status::Allocated) {
|
||||
LOG_ERROR(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle);
|
||||
return NvResult::InsufficientMemory;
|
||||
}
|
||||
|
||||
object->flags = params.flags;
|
||||
object->align = params.align;
|
||||
object->kind = params.kind;
|
||||
object->addr = params.addr;
|
||||
object->status = Object::Status::Allocated;
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocGetIdParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "called");
|
||||
|
||||
if (!params.handle) {
|
||||
LOG_ERROR(Service_NVDRV, "Handle is zero");
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
auto object = GetObject(params.handle);
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
params.id = object->id;
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocFromIdParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
auto itr = std::find_if(handles.begin(), handles.end(),
|
||||
[&](const auto& entry) { return entry.second->id == params.id; });
|
||||
if (itr == handles.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
auto& object = itr->second;
|
||||
if (object->status != Object::Status::Allocated) {
|
||||
LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
itr->second->refcount++;
|
||||
|
||||
// Return the existing handle instead of creating a new one.
|
||||
params.handle = itr->first;
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
enum class ParamTypes { Size = 1, Alignment = 2, Base = 3, Heap = 4, Kind = 5, Compr = 6 };
|
||||
|
||||
IocParamParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called type={}", params.param);
|
||||
|
||||
auto object = GetObject(params.handle);
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if (object->status != Object::Status::Allocated) {
|
||||
LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
switch (static_cast<ParamTypes>(params.param)) {
|
||||
case ParamTypes::Size:
|
||||
params.result = object->size;
|
||||
break;
|
||||
case ParamTypes::Alignment:
|
||||
params.result = object->align;
|
||||
break;
|
||||
case ParamTypes::Heap:
|
||||
// TODO(Subv): Seems to be a hardcoded value?
|
||||
params.result = 0x40000000;
|
||||
break;
|
||||
case ParamTypes::Kind:
|
||||
params.result = object->kind;
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
// TODO(Subv): These flags are unconfirmed.
|
||||
enum FreeFlags {
|
||||
Freed = 0,
|
||||
NotFreedYet = 1,
|
||||
};
|
||||
|
||||
IocFreeParams params;
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
auto itr = handles.find(params.handle);
|
||||
if (itr == handles.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
if (!itr->second->refcount) {
|
||||
LOG_ERROR(
|
||||
Service_NVDRV,
|
||||
"There is no references to this object. The object is already freed. handle={:08X}",
|
||||
params.handle);
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
itr->second->refcount--;
|
||||
|
||||
params.size = itr->second->size;
|
||||
|
||||
if (itr->second->refcount == 0) {
|
||||
params.flags = Freed;
|
||||
// The address of the nvmap is written to the output if we're finally freeing it, otherwise
|
||||
// 0 is written.
|
||||
params.address = itr->second->addr;
|
||||
} else {
|
||||
params.flags = NotFreedYet;
|
||||
params.address = 0;
|
||||
}
|
||||
|
||||
handles.erase(params.handle);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
127
src/core/hle/service/nvdrv/devices/nvmap.h
Executable file
127
src/core/hle/service/nvdrv/devices/nvmap.h
Executable file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
class nvmap final : public nvdevice {
|
||||
public:
|
||||
explicit nvmap(Core::System& system);
|
||||
~nvmap() override;
|
||||
|
||||
NvResult Ioctl1(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl2(Ioctl command, const std::vector<u8>& input,
|
||||
const std::vector<u8>& inline_input, std::vector<u8>& output) override;
|
||||
NvResult Ioctl3(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& inline_output) override;
|
||||
|
||||
/// Returns the allocated address of an nvmap object given its handle.
|
||||
VAddr GetObjectAddress(u32 handle) const;
|
||||
|
||||
/// Represents an nvmap object.
|
||||
struct Object {
|
||||
enum class Status { Created, Allocated };
|
||||
u32 id;
|
||||
u32 size;
|
||||
u32 flags;
|
||||
u32 align;
|
||||
u8 kind;
|
||||
VAddr addr;
|
||||
Status status;
|
||||
u32 refcount;
|
||||
u32 dma_map_addr;
|
||||
};
|
||||
|
||||
std::shared_ptr<Object> GetObject(u32 handle) const {
|
||||
auto itr = handles.find(handle);
|
||||
if (itr != handles.end()) {
|
||||
return itr->second;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
/// Id to use for the next handle that is created.
|
||||
u32 next_handle = 0;
|
||||
|
||||
/// Id to use for the next object that is created.
|
||||
u32 next_id = 0;
|
||||
|
||||
/// Mapping of currently allocated handles to the objects they represent.
|
||||
std::unordered_map<u32, std::shared_ptr<Object>> handles;
|
||||
|
||||
struct IocCreateParams {
|
||||
// Input
|
||||
u32_le size{};
|
||||
// Output
|
||||
u32_le handle{};
|
||||
};
|
||||
static_assert(sizeof(IocCreateParams) == 8, "IocCreateParams has wrong size");
|
||||
|
||||
struct IocFromIdParams {
|
||||
// Input
|
||||
u32_le id{};
|
||||
// Output
|
||||
u32_le handle{};
|
||||
};
|
||||
static_assert(sizeof(IocFromIdParams) == 8, "IocFromIdParams has wrong size");
|
||||
|
||||
struct IocAllocParams {
|
||||
// Input
|
||||
u32_le handle{};
|
||||
u32_le heap_mask{};
|
||||
u32_le flags{};
|
||||
u32_le align{};
|
||||
u8 kind{};
|
||||
INSERT_PADDING_BYTES(7);
|
||||
u64_le addr{};
|
||||
};
|
||||
static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size");
|
||||
|
||||
struct IocFreeParams {
|
||||
u32_le handle{};
|
||||
INSERT_PADDING_BYTES(4);
|
||||
u64_le address{};
|
||||
u32_le size{};
|
||||
u32_le flags{};
|
||||
};
|
||||
static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size");
|
||||
|
||||
struct IocParamParams {
|
||||
// Input
|
||||
u32_le handle{};
|
||||
u32_le param{};
|
||||
// Output
|
||||
u32_le result{};
|
||||
};
|
||||
static_assert(sizeof(IocParamParams) == 12, "IocParamParams has wrong size");
|
||||
|
||||
struct IocGetIdParams {
|
||||
// Output
|
||||
u32_le id{};
|
||||
// Input
|
||||
u32_le handle{};
|
||||
};
|
||||
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
||||
|
||||
u32 CreateObject(u32 size);
|
||||
|
||||
NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
Reference in New Issue
Block a user