another try
This commit is contained in:
@@ -1,310 +1,310 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <algorithm>
|
||||
#include <fstream>
|
||||
#include <vector>
|
||||
#include "common/assert.h"
|
||||
#include "common/settings.h"
|
||||
#include "video_core/host1x/codecs/codec.h"
|
||||
#include "video_core/host1x/codecs/h264.h"
|
||||
#include "video_core/host1x/codecs/vp8.h"
|
||||
#include "video_core/host1x/codecs/vp9.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/opt.h>
|
||||
#ifdef LIBVA_FOUND
|
||||
// for querying VAAPI driver information
|
||||
#include <libavutil/hwcontext_vaapi.h>
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace Tegra {
|
||||
namespace {
|
||||
constexpr AVPixelFormat PREFERRED_GPU_FMT = AV_PIX_FMT_NV12;
|
||||
constexpr AVPixelFormat PREFERRED_CPU_FMT = AV_PIX_FMT_YUV420P;
|
||||
constexpr std::array PREFERRED_GPU_DECODERS = {
|
||||
AV_HWDEVICE_TYPE_CUDA,
|
||||
#ifdef _WIN32
|
||||
AV_HWDEVICE_TYPE_D3D11VA,
|
||||
AV_HWDEVICE_TYPE_DXVA2,
|
||||
#elif defined(__unix__)
|
||||
AV_HWDEVICE_TYPE_VAAPI,
|
||||
AV_HWDEVICE_TYPE_VDPAU,
|
||||
#endif
|
||||
// last resort for Linux Flatpak (w/ NVIDIA)
|
||||
AV_HWDEVICE_TYPE_VULKAN,
|
||||
};
|
||||
|
||||
void AVPacketDeleter(AVPacket* ptr) {
|
||||
av_packet_free(&ptr);
|
||||
}
|
||||
|
||||
using AVPacketPtr = std::unique_ptr<AVPacket, decltype(&AVPacketDeleter)>;
|
||||
|
||||
AVPixelFormat GetGpuFormat(AVCodecContext* av_codec_ctx, const AVPixelFormat* pix_fmts) {
|
||||
for (const AVPixelFormat* p = pix_fmts; *p != AV_PIX_FMT_NONE; ++p) {
|
||||
if (*p == av_codec_ctx->pix_fmt) {
|
||||
return av_codec_ctx->pix_fmt;
|
||||
}
|
||||
}
|
||||
LOG_INFO(Service_NVDRV, "Could not find compatible GPU AV format, falling back to CPU");
|
||||
av_buffer_unref(&av_codec_ctx->hw_device_ctx);
|
||||
av_codec_ctx->pix_fmt = PREFERRED_CPU_FMT;
|
||||
return PREFERRED_CPU_FMT;
|
||||
}
|
||||
|
||||
// List all the currently available hwcontext in ffmpeg
|
||||
std::vector<AVHWDeviceType> ListSupportedContexts() {
|
||||
std::vector<AVHWDeviceType> contexts{};
|
||||
AVHWDeviceType current_device_type = AV_HWDEVICE_TYPE_NONE;
|
||||
do {
|
||||
current_device_type = av_hwdevice_iterate_types(current_device_type);
|
||||
contexts.push_back(current_device_type);
|
||||
} while (current_device_type != AV_HWDEVICE_TYPE_NONE);
|
||||
return contexts;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void AVFrameDeleter(AVFrame* ptr) {
|
||||
av_frame_free(&ptr);
|
||||
}
|
||||
|
||||
Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs)
|
||||
: host1x(host1x_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(host1x)),
|
||||
vp8_decoder(std::make_unique<Decoder::VP8>(host1x)),
|
||||
vp9_decoder(std::make_unique<Decoder::VP9>(host1x)) {}
|
||||
|
||||
Codec::~Codec() {
|
||||
if (!initialized) {
|
||||
return;
|
||||
}
|
||||
// Free libav memory
|
||||
avcodec_free_context(&av_codec_ctx);
|
||||
av_buffer_unref(&av_gpu_decoder);
|
||||
}
|
||||
|
||||
bool Codec::CreateGpuAvDevice() {
|
||||
static constexpr auto HW_CONFIG_METHOD = AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX;
|
||||
static const auto supported_contexts = ListSupportedContexts();
|
||||
for (const auto& type : PREFERRED_GPU_DECODERS) {
|
||||
if (std::none_of(supported_contexts.begin(), supported_contexts.end(),
|
||||
[&type](const auto& context) { return context == type; })) {
|
||||
LOG_DEBUG(Service_NVDRV, "{} explicitly unsupported", av_hwdevice_get_type_name(type));
|
||||
continue;
|
||||
}
|
||||
// Avoid memory leak from not cleaning up after av_hwdevice_ctx_create
|
||||
av_buffer_unref(&av_gpu_decoder);
|
||||
const int hwdevice_res = av_hwdevice_ctx_create(&av_gpu_decoder, type, nullptr, nullptr, 0);
|
||||
if (hwdevice_res < 0) {
|
||||
LOG_DEBUG(Service_NVDRV, "{} av_hwdevice_ctx_create failed {}",
|
||||
av_hwdevice_get_type_name(type), hwdevice_res);
|
||||
continue;
|
||||
}
|
||||
#ifdef LIBVA_FOUND
|
||||
if (type == AV_HWDEVICE_TYPE_VAAPI) {
|
||||
// we need to determine if this is an impersonated VAAPI driver
|
||||
AVHWDeviceContext* hwctx =
|
||||
static_cast<AVHWDeviceContext*>(static_cast<void*>(av_gpu_decoder->data));
|
||||
AVVAAPIDeviceContext* vactx = static_cast<AVVAAPIDeviceContext*>(hwctx->hwctx);
|
||||
const char* vendor_name = vaQueryVendorString(vactx->display);
|
||||
if (strstr(vendor_name, "VDPAU backend")) {
|
||||
// VDPAU impersonated VAAPI impl's are super buggy, we need to skip them
|
||||
LOG_DEBUG(Service_NVDRV, "Skipping vdapu impersonated VAAPI driver");
|
||||
continue;
|
||||
} else {
|
||||
// according to some user testing, certain vaapi driver (Intel?) could be buggy
|
||||
// so let's log the driver name which may help the developers/supporters
|
||||
LOG_DEBUG(Service_NVDRV, "Using VAAPI driver: {}", vendor_name);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
for (int i = 0;; i++) {
|
||||
const AVCodecHWConfig* config = avcodec_get_hw_config(av_codec, i);
|
||||
if (!config) {
|
||||
LOG_DEBUG(Service_NVDRV, "{} decoder does not support device type {}.",
|
||||
av_codec->name, av_hwdevice_get_type_name(type));
|
||||
break;
|
||||
}
|
||||
if ((config->methods & HW_CONFIG_METHOD) != 0 && config->device_type == type) {
|
||||
#if defined(__unix__)
|
||||
// Some linux decoding backends are reported to crash with this config method
|
||||
// TODO(ameerj): Properly support this method
|
||||
if ((config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX) != 0) {
|
||||
// skip zero-copy decoders, we don't currently support them
|
||||
LOG_DEBUG(Service_NVDRV, "Skipping decoder {} with unsupported capability {}.",
|
||||
av_hwdevice_get_type_name(type), config->methods);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
LOG_INFO(Service_NVDRV, "Using {} GPU decoder", av_hwdevice_get_type_name(type));
|
||||
av_codec_ctx->pix_fmt = config->pix_fmt;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void Codec::InitializeAvCodecContext() {
|
||||
av_codec_ctx = avcodec_alloc_context3(av_codec);
|
||||
av_opt_set(av_codec_ctx->priv_data, "tune", "zerolatency", 0);
|
||||
}
|
||||
|
||||
void Codec::InitializeGpuDecoder() {
|
||||
if (!CreateGpuAvDevice()) {
|
||||
av_buffer_unref(&av_gpu_decoder);
|
||||
return;
|
||||
}
|
||||
auto* hw_device_ctx = av_buffer_ref(av_gpu_decoder);
|
||||
ASSERT_MSG(hw_device_ctx, "av_buffer_ref failed");
|
||||
av_codec_ctx->hw_device_ctx = hw_device_ctx;
|
||||
av_codec_ctx->get_format = GetGpuFormat;
|
||||
}
|
||||
|
||||
void Codec::Initialize() {
|
||||
const AVCodecID codec = [&] {
|
||||
switch (current_codec) {
|
||||
case Host1x::NvdecCommon::VideoCodec::H264:
|
||||
return AV_CODEC_ID_H264;
|
||||
case Host1x::NvdecCommon::VideoCodec::VP8:
|
||||
return AV_CODEC_ID_VP8;
|
||||
case Host1x::NvdecCommon::VideoCodec::VP9:
|
||||
return AV_CODEC_ID_VP9;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unknown codec {}", current_codec);
|
||||
return AV_CODEC_ID_NONE;
|
||||
}
|
||||
}();
|
||||
av_codec = avcodec_find_decoder(codec);
|
||||
|
||||
InitializeAvCodecContext();
|
||||
if (Settings::values.nvdec_emulation.GetValue() == Settings::NvdecEmulation::GPU) {
|
||||
InitializeGpuDecoder();
|
||||
}
|
||||
if (const int res = avcodec_open2(av_codec_ctx, av_codec, nullptr); res < 0) {
|
||||
LOG_ERROR(Service_NVDRV, "avcodec_open2() Failed with result {}", res);
|
||||
avcodec_free_context(&av_codec_ctx);
|
||||
av_buffer_unref(&av_gpu_decoder);
|
||||
return;
|
||||
}
|
||||
if (!av_codec_ctx->hw_device_ctx) {
|
||||
LOG_INFO(Service_NVDRV, "Using FFmpeg software decoding");
|
||||
}
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) {
|
||||
if (current_codec != codec) {
|
||||
current_codec = codec;
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName());
|
||||
}
|
||||
}
|
||||
|
||||
void Codec::Decode() {
|
||||
const bool is_first_frame = !initialized;
|
||||
if (is_first_frame) {
|
||||
Initialize();
|
||||
}
|
||||
if (!initialized) {
|
||||
return;
|
||||
}
|
||||
bool vp9_hidden_frame = false;
|
||||
const auto& frame_data = [&]() {
|
||||
switch (current_codec) {
|
||||
case Tegra::Host1x::NvdecCommon::VideoCodec::H264:
|
||||
return h264_decoder->ComposeFrame(state, is_first_frame);
|
||||
case Tegra::Host1x::NvdecCommon::VideoCodec::VP8:
|
||||
return vp8_decoder->ComposeFrame(state);
|
||||
case Tegra::Host1x::NvdecCommon::VideoCodec::VP9:
|
||||
vp9_decoder->ComposeFrame(state);
|
||||
vp9_hidden_frame = vp9_decoder->WasFrameHidden();
|
||||
return vp9_decoder->GetFrameBytes();
|
||||
default:
|
||||
ASSERT(false);
|
||||
return std::vector<u8>{};
|
||||
}
|
||||
}();
|
||||
AVPacketPtr packet{av_packet_alloc(), AVPacketDeleter};
|
||||
if (!packet) {
|
||||
LOG_ERROR(Service_NVDRV, "av_packet_alloc failed");
|
||||
return;
|
||||
}
|
||||
packet->data = const_cast<u8*>(frame_data.data());
|
||||
packet->size = static_cast<s32>(frame_data.size());
|
||||
if (const int res = avcodec_send_packet(av_codec_ctx, packet.get()); res != 0) {
|
||||
LOG_DEBUG(Service_NVDRV, "avcodec_send_packet error {}", res);
|
||||
return;
|
||||
}
|
||||
// Only receive/store visible frames
|
||||
if (vp9_hidden_frame) {
|
||||
return;
|
||||
}
|
||||
AVFramePtr initial_frame{av_frame_alloc(), AVFrameDeleter};
|
||||
AVFramePtr final_frame{nullptr, AVFrameDeleter};
|
||||
ASSERT_MSG(initial_frame, "av_frame_alloc initial_frame failed");
|
||||
if (const int ret = avcodec_receive_frame(av_codec_ctx, initial_frame.get()); ret) {
|
||||
LOG_DEBUG(Service_NVDRV, "avcodec_receive_frame error {}", ret);
|
||||
return;
|
||||
}
|
||||
if (initial_frame->width == 0 || initial_frame->height == 0) {
|
||||
LOG_WARNING(Service_NVDRV, "Zero width or height in frame");
|
||||
return;
|
||||
}
|
||||
if (av_codec_ctx->hw_device_ctx) {
|
||||
final_frame = AVFramePtr{av_frame_alloc(), AVFrameDeleter};
|
||||
ASSERT_MSG(final_frame, "av_frame_alloc final_frame failed");
|
||||
// Can't use AV_PIX_FMT_YUV420P and share code with software decoding in vic.cpp
|
||||
// because Intel drivers crash unless using AV_PIX_FMT_NV12
|
||||
final_frame->format = PREFERRED_GPU_FMT;
|
||||
const int ret = av_hwframe_transfer_data(final_frame.get(), initial_frame.get(), 0);
|
||||
ASSERT_MSG(!ret, "av_hwframe_transfer_data error {}", ret);
|
||||
} else {
|
||||
final_frame = std::move(initial_frame);
|
||||
}
|
||||
if (final_frame->format != PREFERRED_CPU_FMT && final_frame->format != PREFERRED_GPU_FMT) {
|
||||
UNIMPLEMENTED_MSG("Unexpected video format: {}", final_frame->format);
|
||||
return;
|
||||
}
|
||||
av_frames.push(std::move(final_frame));
|
||||
if (av_frames.size() > 10) {
|
||||
LOG_TRACE(Service_NVDRV, "av_frames.push overflow dropped frame");
|
||||
av_frames.pop();
|
||||
}
|
||||
}
|
||||
|
||||
AVFramePtr Codec::GetCurrentFrame() {
|
||||
// Sometimes VIC will request more frames than have been decoded.
|
||||
// in this case, return a nullptr and don't overwrite previous frame data
|
||||
if (av_frames.empty()) {
|
||||
return AVFramePtr{nullptr, AVFrameDeleter};
|
||||
}
|
||||
AVFramePtr frame = std::move(av_frames.front());
|
||||
av_frames.pop();
|
||||
return frame;
|
||||
}
|
||||
|
||||
Host1x::NvdecCommon::VideoCodec Codec::GetCurrentCodec() const {
|
||||
return current_codec;
|
||||
}
|
||||
|
||||
std::string_view Codec::GetCurrentCodecName() const {
|
||||
switch (current_codec) {
|
||||
case Host1x::NvdecCommon::VideoCodec::None:
|
||||
return "None";
|
||||
case Host1x::NvdecCommon::VideoCodec::H264:
|
||||
return "H264";
|
||||
case Host1x::NvdecCommon::VideoCodec::VP8:
|
||||
return "VP8";
|
||||
case Host1x::NvdecCommon::VideoCodec::H265:
|
||||
return "H265";
|
||||
case Host1x::NvdecCommon::VideoCodec::VP9:
|
||||
return "VP9";
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <algorithm>
|
||||
#include <fstream>
|
||||
#include <vector>
|
||||
#include "common/assert.h"
|
||||
#include "common/settings.h"
|
||||
#include "video_core/host1x/codecs/codec.h"
|
||||
#include "video_core/host1x/codecs/h264.h"
|
||||
#include "video_core/host1x/codecs/vp8.h"
|
||||
#include "video_core/host1x/codecs/vp9.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/opt.h>
|
||||
#ifdef LIBVA_FOUND
|
||||
// for querying VAAPI driver information
|
||||
#include <libavutil/hwcontext_vaapi.h>
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace Tegra {
|
||||
namespace {
|
||||
constexpr AVPixelFormat PREFERRED_GPU_FMT = AV_PIX_FMT_NV12;
|
||||
constexpr AVPixelFormat PREFERRED_CPU_FMT = AV_PIX_FMT_YUV420P;
|
||||
constexpr std::array PREFERRED_GPU_DECODERS = {
|
||||
AV_HWDEVICE_TYPE_CUDA,
|
||||
#ifdef _WIN32
|
||||
AV_HWDEVICE_TYPE_D3D11VA,
|
||||
AV_HWDEVICE_TYPE_DXVA2,
|
||||
#elif defined(__unix__)
|
||||
AV_HWDEVICE_TYPE_VAAPI,
|
||||
AV_HWDEVICE_TYPE_VDPAU,
|
||||
#endif
|
||||
// last resort for Linux Flatpak (w/ NVIDIA)
|
||||
AV_HWDEVICE_TYPE_VULKAN,
|
||||
};
|
||||
|
||||
void AVPacketDeleter(AVPacket* ptr) {
|
||||
av_packet_free(&ptr);
|
||||
}
|
||||
|
||||
using AVPacketPtr = std::unique_ptr<AVPacket, decltype(&AVPacketDeleter)>;
|
||||
|
||||
AVPixelFormat GetGpuFormat(AVCodecContext* av_codec_ctx, const AVPixelFormat* pix_fmts) {
|
||||
for (const AVPixelFormat* p = pix_fmts; *p != AV_PIX_FMT_NONE; ++p) {
|
||||
if (*p == av_codec_ctx->pix_fmt) {
|
||||
return av_codec_ctx->pix_fmt;
|
||||
}
|
||||
}
|
||||
LOG_INFO(Service_NVDRV, "Could not find compatible GPU AV format, falling back to CPU");
|
||||
av_buffer_unref(&av_codec_ctx->hw_device_ctx);
|
||||
av_codec_ctx->pix_fmt = PREFERRED_CPU_FMT;
|
||||
return PREFERRED_CPU_FMT;
|
||||
}
|
||||
|
||||
// List all the currently available hwcontext in ffmpeg
|
||||
std::vector<AVHWDeviceType> ListSupportedContexts() {
|
||||
std::vector<AVHWDeviceType> contexts{};
|
||||
AVHWDeviceType current_device_type = AV_HWDEVICE_TYPE_NONE;
|
||||
do {
|
||||
current_device_type = av_hwdevice_iterate_types(current_device_type);
|
||||
contexts.push_back(current_device_type);
|
||||
} while (current_device_type != AV_HWDEVICE_TYPE_NONE);
|
||||
return contexts;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void AVFrameDeleter(AVFrame* ptr) {
|
||||
av_frame_free(&ptr);
|
||||
}
|
||||
|
||||
Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs)
|
||||
: host1x(host1x_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(host1x)),
|
||||
vp8_decoder(std::make_unique<Decoder::VP8>(host1x)),
|
||||
vp9_decoder(std::make_unique<Decoder::VP9>(host1x)) {}
|
||||
|
||||
Codec::~Codec() {
|
||||
if (!initialized) {
|
||||
return;
|
||||
}
|
||||
// Free libav memory
|
||||
avcodec_free_context(&av_codec_ctx);
|
||||
av_buffer_unref(&av_gpu_decoder);
|
||||
}
|
||||
|
||||
bool Codec::CreateGpuAvDevice() {
|
||||
static constexpr auto HW_CONFIG_METHOD = AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX;
|
||||
static const auto supported_contexts = ListSupportedContexts();
|
||||
for (const auto& type : PREFERRED_GPU_DECODERS) {
|
||||
if (std::none_of(supported_contexts.begin(), supported_contexts.end(),
|
||||
[&type](const auto& context) { return context == type; })) {
|
||||
LOG_DEBUG(Service_NVDRV, "{} explicitly unsupported", av_hwdevice_get_type_name(type));
|
||||
continue;
|
||||
}
|
||||
// Avoid memory leak from not cleaning up after av_hwdevice_ctx_create
|
||||
av_buffer_unref(&av_gpu_decoder);
|
||||
const int hwdevice_res = av_hwdevice_ctx_create(&av_gpu_decoder, type, nullptr, nullptr, 0);
|
||||
if (hwdevice_res < 0) {
|
||||
LOG_DEBUG(Service_NVDRV, "{} av_hwdevice_ctx_create failed {}",
|
||||
av_hwdevice_get_type_name(type), hwdevice_res);
|
||||
continue;
|
||||
}
|
||||
#ifdef LIBVA_FOUND
|
||||
if (type == AV_HWDEVICE_TYPE_VAAPI) {
|
||||
// we need to determine if this is an impersonated VAAPI driver
|
||||
AVHWDeviceContext* hwctx =
|
||||
static_cast<AVHWDeviceContext*>(static_cast<void*>(av_gpu_decoder->data));
|
||||
AVVAAPIDeviceContext* vactx = static_cast<AVVAAPIDeviceContext*>(hwctx->hwctx);
|
||||
const char* vendor_name = vaQueryVendorString(vactx->display);
|
||||
if (strstr(vendor_name, "VDPAU backend")) {
|
||||
// VDPAU impersonated VAAPI impl's are super buggy, we need to skip them
|
||||
LOG_DEBUG(Service_NVDRV, "Skipping vdapu impersonated VAAPI driver");
|
||||
continue;
|
||||
} else {
|
||||
// according to some user testing, certain vaapi driver (Intel?) could be buggy
|
||||
// so let's log the driver name which may help the developers/supporters
|
||||
LOG_DEBUG(Service_NVDRV, "Using VAAPI driver: {}", vendor_name);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
for (int i = 0;; i++) {
|
||||
const AVCodecHWConfig* config = avcodec_get_hw_config(av_codec, i);
|
||||
if (!config) {
|
||||
LOG_DEBUG(Service_NVDRV, "{} decoder does not support device type {}.",
|
||||
av_codec->name, av_hwdevice_get_type_name(type));
|
||||
break;
|
||||
}
|
||||
if ((config->methods & HW_CONFIG_METHOD) != 0 && config->device_type == type) {
|
||||
#if defined(__unix__)
|
||||
// Some linux decoding backends are reported to crash with this config method
|
||||
// TODO(ameerj): Properly support this method
|
||||
if ((config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX) != 0) {
|
||||
// skip zero-copy decoders, we don't currently support them
|
||||
LOG_DEBUG(Service_NVDRV, "Skipping decoder {} with unsupported capability {}.",
|
||||
av_hwdevice_get_type_name(type), config->methods);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
LOG_INFO(Service_NVDRV, "Using {} GPU decoder", av_hwdevice_get_type_name(type));
|
||||
av_codec_ctx->pix_fmt = config->pix_fmt;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void Codec::InitializeAvCodecContext() {
|
||||
av_codec_ctx = avcodec_alloc_context3(av_codec);
|
||||
av_opt_set(av_codec_ctx->priv_data, "tune", "zerolatency", 0);
|
||||
}
|
||||
|
||||
void Codec::InitializeGpuDecoder() {
|
||||
if (!CreateGpuAvDevice()) {
|
||||
av_buffer_unref(&av_gpu_decoder);
|
||||
return;
|
||||
}
|
||||
auto* hw_device_ctx = av_buffer_ref(av_gpu_decoder);
|
||||
ASSERT_MSG(hw_device_ctx, "av_buffer_ref failed");
|
||||
av_codec_ctx->hw_device_ctx = hw_device_ctx;
|
||||
av_codec_ctx->get_format = GetGpuFormat;
|
||||
}
|
||||
|
||||
void Codec::Initialize() {
|
||||
const AVCodecID codec = [&] {
|
||||
switch (current_codec) {
|
||||
case Host1x::NvdecCommon::VideoCodec::H264:
|
||||
return AV_CODEC_ID_H264;
|
||||
case Host1x::NvdecCommon::VideoCodec::VP8:
|
||||
return AV_CODEC_ID_VP8;
|
||||
case Host1x::NvdecCommon::VideoCodec::VP9:
|
||||
return AV_CODEC_ID_VP9;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unknown codec {}", current_codec);
|
||||
return AV_CODEC_ID_NONE;
|
||||
}
|
||||
}();
|
||||
av_codec = avcodec_find_decoder(codec);
|
||||
|
||||
InitializeAvCodecContext();
|
||||
if (Settings::values.nvdec_emulation.GetValue() == Settings::NvdecEmulation::GPU) {
|
||||
InitializeGpuDecoder();
|
||||
}
|
||||
if (const int res = avcodec_open2(av_codec_ctx, av_codec, nullptr); res < 0) {
|
||||
LOG_ERROR(Service_NVDRV, "avcodec_open2() Failed with result {}", res);
|
||||
avcodec_free_context(&av_codec_ctx);
|
||||
av_buffer_unref(&av_gpu_decoder);
|
||||
return;
|
||||
}
|
||||
if (!av_codec_ctx->hw_device_ctx) {
|
||||
LOG_INFO(Service_NVDRV, "Using FFmpeg software decoding");
|
||||
}
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) {
|
||||
if (current_codec != codec) {
|
||||
current_codec = codec;
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName());
|
||||
}
|
||||
}
|
||||
|
||||
void Codec::Decode() {
|
||||
const bool is_first_frame = !initialized;
|
||||
if (is_first_frame) {
|
||||
Initialize();
|
||||
}
|
||||
if (!initialized) {
|
||||
return;
|
||||
}
|
||||
bool vp9_hidden_frame = false;
|
||||
const auto& frame_data = [&]() {
|
||||
switch (current_codec) {
|
||||
case Tegra::Host1x::NvdecCommon::VideoCodec::H264:
|
||||
return h264_decoder->ComposeFrame(state, is_first_frame);
|
||||
case Tegra::Host1x::NvdecCommon::VideoCodec::VP8:
|
||||
return vp8_decoder->ComposeFrame(state);
|
||||
case Tegra::Host1x::NvdecCommon::VideoCodec::VP9:
|
||||
vp9_decoder->ComposeFrame(state);
|
||||
vp9_hidden_frame = vp9_decoder->WasFrameHidden();
|
||||
return vp9_decoder->GetFrameBytes();
|
||||
default:
|
||||
ASSERT(false);
|
||||
return std::vector<u8>{};
|
||||
}
|
||||
}();
|
||||
AVPacketPtr packet{av_packet_alloc(), AVPacketDeleter};
|
||||
if (!packet) {
|
||||
LOG_ERROR(Service_NVDRV, "av_packet_alloc failed");
|
||||
return;
|
||||
}
|
||||
packet->data = const_cast<u8*>(frame_data.data());
|
||||
packet->size = static_cast<s32>(frame_data.size());
|
||||
if (const int res = avcodec_send_packet(av_codec_ctx, packet.get()); res != 0) {
|
||||
LOG_DEBUG(Service_NVDRV, "avcodec_send_packet error {}", res);
|
||||
return;
|
||||
}
|
||||
// Only receive/store visible frames
|
||||
if (vp9_hidden_frame) {
|
||||
return;
|
||||
}
|
||||
AVFramePtr initial_frame{av_frame_alloc(), AVFrameDeleter};
|
||||
AVFramePtr final_frame{nullptr, AVFrameDeleter};
|
||||
ASSERT_MSG(initial_frame, "av_frame_alloc initial_frame failed");
|
||||
if (const int ret = avcodec_receive_frame(av_codec_ctx, initial_frame.get()); ret) {
|
||||
LOG_DEBUG(Service_NVDRV, "avcodec_receive_frame error {}", ret);
|
||||
return;
|
||||
}
|
||||
if (initial_frame->width == 0 || initial_frame->height == 0) {
|
||||
LOG_WARNING(Service_NVDRV, "Zero width or height in frame");
|
||||
return;
|
||||
}
|
||||
if (av_codec_ctx->hw_device_ctx) {
|
||||
final_frame = AVFramePtr{av_frame_alloc(), AVFrameDeleter};
|
||||
ASSERT_MSG(final_frame, "av_frame_alloc final_frame failed");
|
||||
// Can't use AV_PIX_FMT_YUV420P and share code with software decoding in vic.cpp
|
||||
// because Intel drivers crash unless using AV_PIX_FMT_NV12
|
||||
final_frame->format = PREFERRED_GPU_FMT;
|
||||
const int ret = av_hwframe_transfer_data(final_frame.get(), initial_frame.get(), 0);
|
||||
ASSERT_MSG(!ret, "av_hwframe_transfer_data error {}", ret);
|
||||
} else {
|
||||
final_frame = std::move(initial_frame);
|
||||
}
|
||||
if (final_frame->format != PREFERRED_CPU_FMT && final_frame->format != PREFERRED_GPU_FMT) {
|
||||
UNIMPLEMENTED_MSG("Unexpected video format: {}", final_frame->format);
|
||||
return;
|
||||
}
|
||||
av_frames.push(std::move(final_frame));
|
||||
if (av_frames.size() > 10) {
|
||||
LOG_TRACE(Service_NVDRV, "av_frames.push overflow dropped frame");
|
||||
av_frames.pop();
|
||||
}
|
||||
}
|
||||
|
||||
AVFramePtr Codec::GetCurrentFrame() {
|
||||
// Sometimes VIC will request more frames than have been decoded.
|
||||
// in this case, return a nullptr and don't overwrite previous frame data
|
||||
if (av_frames.empty()) {
|
||||
return AVFramePtr{nullptr, AVFrameDeleter};
|
||||
}
|
||||
AVFramePtr frame = std::move(av_frames.front());
|
||||
av_frames.pop();
|
||||
return frame;
|
||||
}
|
||||
|
||||
Host1x::NvdecCommon::VideoCodec Codec::GetCurrentCodec() const {
|
||||
return current_codec;
|
||||
}
|
||||
|
||||
std::string_view Codec::GetCurrentCodecName() const {
|
||||
switch (current_codec) {
|
||||
case Host1x::NvdecCommon::VideoCodec::None:
|
||||
return "None";
|
||||
case Host1x::NvdecCommon::VideoCodec::H264:
|
||||
return "H264";
|
||||
case Host1x::NvdecCommon::VideoCodec::VP8:
|
||||
return "VP8";
|
||||
case Host1x::NvdecCommon::VideoCodec::H265:
|
||||
return "H265";
|
||||
case Host1x::NvdecCommon::VideoCodec::VP9:
|
||||
return "VP9";
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,84 +1,84 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string_view>
|
||||
#include <queue>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
extern "C" {
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wconversion"
|
||||
#endif
|
||||
#include <libavcodec/avcodec.h>
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
void AVFrameDeleter(AVFrame* ptr);
|
||||
using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>;
|
||||
|
||||
namespace Decoder {
|
||||
class H264;
|
||||
class VP8;
|
||||
class VP9;
|
||||
} // namespace Decoder
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
class Codec {
|
||||
public:
|
||||
explicit Codec(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs);
|
||||
~Codec();
|
||||
|
||||
/// Initialize the codec, returning success or failure
|
||||
void Initialize();
|
||||
|
||||
/// Sets NVDEC video stream codec
|
||||
void SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec);
|
||||
|
||||
/// Call decoders to construct headers, decode AVFrame with ffmpeg
|
||||
void Decode();
|
||||
|
||||
/// Returns next decoded frame
|
||||
[[nodiscard]] AVFramePtr GetCurrentFrame();
|
||||
|
||||
/// Returns the value of current_codec
|
||||
[[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const;
|
||||
|
||||
/// Return name of the current codec
|
||||
[[nodiscard]] std::string_view GetCurrentCodecName() const;
|
||||
|
||||
private:
|
||||
void InitializeAvCodecContext();
|
||||
|
||||
void InitializeGpuDecoder();
|
||||
|
||||
bool CreateGpuAvDevice();
|
||||
|
||||
bool initialized{};
|
||||
Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None};
|
||||
|
||||
const AVCodec* av_codec{nullptr};
|
||||
AVCodecContext* av_codec_ctx{nullptr};
|
||||
AVBufferRef* av_gpu_decoder{nullptr};
|
||||
|
||||
Host1x::Host1x& host1x;
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state;
|
||||
std::unique_ptr<Decoder::H264> h264_decoder;
|
||||
std::unique_ptr<Decoder::VP8> vp8_decoder;
|
||||
std::unique_ptr<Decoder::VP9> vp9_decoder;
|
||||
|
||||
std::queue<AVFramePtr> av_frames{};
|
||||
};
|
||||
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string_view>
|
||||
#include <queue>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
extern "C" {
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wconversion"
|
||||
#endif
|
||||
#include <libavcodec/avcodec.h>
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
void AVFrameDeleter(AVFrame* ptr);
|
||||
using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>;
|
||||
|
||||
namespace Decoder {
|
||||
class H264;
|
||||
class VP8;
|
||||
class VP9;
|
||||
} // namespace Decoder
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
class Codec {
|
||||
public:
|
||||
explicit Codec(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs);
|
||||
~Codec();
|
||||
|
||||
/// Initialize the codec, returning success or failure
|
||||
void Initialize();
|
||||
|
||||
/// Sets NVDEC video stream codec
|
||||
void SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec);
|
||||
|
||||
/// Call decoders to construct headers, decode AVFrame with ffmpeg
|
||||
void Decode();
|
||||
|
||||
/// Returns next decoded frame
|
||||
[[nodiscard]] AVFramePtr GetCurrentFrame();
|
||||
|
||||
/// Returns the value of current_codec
|
||||
[[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const;
|
||||
|
||||
/// Return name of the current codec
|
||||
[[nodiscard]] std::string_view GetCurrentCodecName() const;
|
||||
|
||||
private:
|
||||
void InitializeAvCodecContext();
|
||||
|
||||
void InitializeGpuDecoder();
|
||||
|
||||
bool CreateGpuAvDevice();
|
||||
|
||||
bool initialized{};
|
||||
Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None};
|
||||
|
||||
const AVCodec* av_codec{nullptr};
|
||||
AVCodecContext* av_codec_ctx{nullptr};
|
||||
AVBufferRef* av_gpu_decoder{nullptr};
|
||||
|
||||
Host1x::Host1x& host1x;
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state;
|
||||
std::unique_ptr<Decoder::H264> h264_decoder;
|
||||
std::unique_ptr<Decoder::VP8> vp8_decoder;
|
||||
std::unique_ptr<Decoder::VP9> vp9_decoder;
|
||||
|
||||
std::queue<AVFramePtr> av_frames{};
|
||||
};
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,278 +1,278 @@
|
||||
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
|
||||
#include "common/settings.h"
|
||||
#include "video_core/host1x/codecs/h264.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Tegra::Decoder {
|
||||
namespace {
|
||||
// ZigZag LUTs from libavcodec.
|
||||
constexpr std::array<u8, 64> zig_zag_direct{
|
||||
0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48,
|
||||
41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23,
|
||||
30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63,
|
||||
};
|
||||
|
||||
constexpr std::array<u8, 16> zig_zag_scan{
|
||||
0 + 0 * 4, 1 + 0 * 4, 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 2 + 0 * 4, 3 + 0 * 4, 2 + 1 * 4,
|
||||
1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4, 3 + 1 * 4, 3 + 2 * 4, 2 + 3 * 4, 3 + 3 * 4,
|
||||
};
|
||||
} // Anonymous namespace
|
||||
|
||||
H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||
|
||||
H264::~H264() = default;
|
||||
|
||||
const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state,
|
||||
bool is_first_frame) {
|
||||
H264DecoderContext context;
|
||||
host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context,
|
||||
sizeof(H264DecoderContext));
|
||||
|
||||
const s64 frame_number = context.h264_parameter_set.frame_number.Value();
|
||||
if (!is_first_frame && frame_number != 0) {
|
||||
frame.resize(context.stream_len);
|
||||
host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
|
||||
return frame;
|
||||
}
|
||||
|
||||
// Encode header
|
||||
H264BitWriter writer{};
|
||||
writer.WriteU(1, 24);
|
||||
writer.WriteU(0, 1);
|
||||
writer.WriteU(3, 2);
|
||||
writer.WriteU(7, 5);
|
||||
writer.WriteU(100, 8);
|
||||
writer.WriteU(0, 8);
|
||||
writer.WriteU(31, 8);
|
||||
writer.WriteUe(0);
|
||||
const u32 chroma_format_idc =
|
||||
static_cast<u32>(context.h264_parameter_set.chroma_format_idc.Value());
|
||||
writer.WriteUe(chroma_format_idc);
|
||||
if (chroma_format_idc == 3) {
|
||||
writer.WriteBit(false);
|
||||
}
|
||||
|
||||
writer.WriteUe(0);
|
||||
writer.WriteUe(0);
|
||||
writer.WriteBit(false); // QpprimeYZeroTransformBypassFlag
|
||||
writer.WriteBit(false); // Scaling matrix present flag
|
||||
|
||||
writer.WriteUe(static_cast<u32>(context.h264_parameter_set.log2_max_frame_num_minus4.Value()));
|
||||
|
||||
const auto order_cnt_type =
|
||||
static_cast<u32>(context.h264_parameter_set.pic_order_cnt_type.Value());
|
||||
writer.WriteUe(order_cnt_type);
|
||||
if (order_cnt_type == 0) {
|
||||
writer.WriteUe(context.h264_parameter_set.log2_max_pic_order_cnt_lsb_minus4);
|
||||
} else if (order_cnt_type == 1) {
|
||||
writer.WriteBit(context.h264_parameter_set.delta_pic_order_always_zero_flag != 0);
|
||||
|
||||
writer.WriteSe(0);
|
||||
writer.WriteSe(0);
|
||||
writer.WriteUe(0);
|
||||
}
|
||||
|
||||
const s32 pic_height = context.h264_parameter_set.frame_height_in_map_units /
|
||||
(context.h264_parameter_set.frame_mbs_only_flag ? 1 : 2);
|
||||
|
||||
// TODO (ameerj): Where do we get this number, it seems to be particular for each stream
|
||||
const auto nvdec_decoding = Settings::values.nvdec_emulation.GetValue();
|
||||
const bool uses_gpu_decoding = nvdec_decoding == Settings::NvdecEmulation::GPU;
|
||||
const u32 max_num_ref_frames = uses_gpu_decoding ? 6u : 16u;
|
||||
writer.WriteUe(max_num_ref_frames);
|
||||
writer.WriteBit(false);
|
||||
writer.WriteUe(context.h264_parameter_set.pic_width_in_mbs - 1);
|
||||
writer.WriteUe(pic_height - 1);
|
||||
writer.WriteBit(context.h264_parameter_set.frame_mbs_only_flag != 0);
|
||||
|
||||
if (!context.h264_parameter_set.frame_mbs_only_flag) {
|
||||
writer.WriteBit(context.h264_parameter_set.flags.mbaff_frame.Value() != 0);
|
||||
}
|
||||
|
||||
writer.WriteBit(context.h264_parameter_set.flags.direct_8x8_inference.Value() != 0);
|
||||
writer.WriteBit(false); // Frame cropping flag
|
||||
writer.WriteBit(false); // VUI parameter present flag
|
||||
|
||||
writer.End();
|
||||
|
||||
// H264 PPS
|
||||
writer.WriteU(1, 24);
|
||||
writer.WriteU(0, 1);
|
||||
writer.WriteU(3, 2);
|
||||
writer.WriteU(8, 5);
|
||||
|
||||
writer.WriteUe(0);
|
||||
writer.WriteUe(0);
|
||||
|
||||
writer.WriteBit(context.h264_parameter_set.entropy_coding_mode_flag != 0);
|
||||
writer.WriteBit(false);
|
||||
writer.WriteUe(0);
|
||||
writer.WriteUe(context.h264_parameter_set.num_refidx_l0_default_active);
|
||||
writer.WriteUe(context.h264_parameter_set.num_refidx_l1_default_active);
|
||||
writer.WriteBit(context.h264_parameter_set.flags.weighted_pred.Value() != 0);
|
||||
writer.WriteU(static_cast<s32>(context.h264_parameter_set.weighted_bipred_idc.Value()), 2);
|
||||
s32 pic_init_qp = static_cast<s32>(context.h264_parameter_set.pic_init_qp_minus26.Value());
|
||||
writer.WriteSe(pic_init_qp);
|
||||
writer.WriteSe(0);
|
||||
s32 chroma_qp_index_offset =
|
||||
static_cast<s32>(context.h264_parameter_set.chroma_qp_index_offset.Value());
|
||||
|
||||
writer.WriteSe(chroma_qp_index_offset);
|
||||
writer.WriteBit(context.h264_parameter_set.deblocking_filter_control_present_flag != 0);
|
||||
writer.WriteBit(context.h264_parameter_set.flags.constrained_intra_pred.Value() != 0);
|
||||
writer.WriteBit(context.h264_parameter_set.redundant_pic_cnt_present_flag != 0);
|
||||
writer.WriteBit(context.h264_parameter_set.transform_8x8_mode_flag != 0);
|
||||
|
||||
writer.WriteBit(true);
|
||||
|
||||
for (s32 index = 0; index < 6; index++) {
|
||||
writer.WriteBit(true);
|
||||
std::span<const u8> matrix{context.weight_scale};
|
||||
writer.WriteScalingList(matrix, index * 16, 16);
|
||||
}
|
||||
|
||||
if (context.h264_parameter_set.transform_8x8_mode_flag) {
|
||||
for (s32 index = 0; index < 2; index++) {
|
||||
writer.WriteBit(true);
|
||||
std::span<const u8> matrix{context.weight_scale_8x8};
|
||||
writer.WriteScalingList(matrix, index * 64, 64);
|
||||
}
|
||||
}
|
||||
|
||||
s32 chroma_qp_index_offset2 =
|
||||
static_cast<s32>(context.h264_parameter_set.second_chroma_qp_index_offset.Value());
|
||||
|
||||
writer.WriteSe(chroma_qp_index_offset2);
|
||||
|
||||
writer.End();
|
||||
|
||||
const auto& encoded_header = writer.GetByteArray();
|
||||
frame.resize(encoded_header.size() + context.stream_len);
|
||||
std::memcpy(frame.data(), encoded_header.data(), encoded_header.size());
|
||||
|
||||
host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
|
||||
frame.data() + encoded_header.size(), context.stream_len);
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
H264BitWriter::H264BitWriter() = default;
|
||||
|
||||
H264BitWriter::~H264BitWriter() = default;
|
||||
|
||||
void H264BitWriter::WriteU(s32 value, s32 value_sz) {
|
||||
WriteBits(value, value_sz);
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteSe(s32 value) {
|
||||
WriteExpGolombCodedInt(value);
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteUe(u32 value) {
|
||||
WriteExpGolombCodedUInt(value);
|
||||
}
|
||||
|
||||
void H264BitWriter::End() {
|
||||
WriteBit(true);
|
||||
Flush();
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteBit(bool state) {
|
||||
WriteBits(state ? 1 : 0, 1);
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteScalingList(std::span<const u8> list, s32 start, s32 count) {
|
||||
std::vector<u8> scan(count);
|
||||
if (count == 16) {
|
||||
std::memcpy(scan.data(), zig_zag_scan.data(), scan.size());
|
||||
} else {
|
||||
std::memcpy(scan.data(), zig_zag_direct.data(), scan.size());
|
||||
}
|
||||
u8 last_scale = 8;
|
||||
|
||||
for (s32 index = 0; index < count; index++) {
|
||||
const u8 value = list[start + scan[index]];
|
||||
const s32 delta_scale = static_cast<s32>(value - last_scale);
|
||||
|
||||
WriteSe(delta_scale);
|
||||
|
||||
last_scale = value;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<u8>& H264BitWriter::GetByteArray() {
|
||||
return byte_array;
|
||||
}
|
||||
|
||||
const std::vector<u8>& H264BitWriter::GetByteArray() const {
|
||||
return byte_array;
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteBits(s32 value, s32 bit_count) {
|
||||
s32 value_pos = 0;
|
||||
|
||||
s32 remaining = bit_count;
|
||||
|
||||
while (remaining > 0) {
|
||||
s32 copy_size = remaining;
|
||||
|
||||
const s32 free_bits = GetFreeBufferBits();
|
||||
|
||||
if (copy_size > free_bits) {
|
||||
copy_size = free_bits;
|
||||
}
|
||||
|
||||
const s32 mask = (1 << copy_size) - 1;
|
||||
|
||||
const s32 src_shift = (bit_count - value_pos) - copy_size;
|
||||
const s32 dst_shift = (buffer_size - buffer_pos) - copy_size;
|
||||
|
||||
buffer |= ((value >> src_shift) & mask) << dst_shift;
|
||||
|
||||
value_pos += copy_size;
|
||||
buffer_pos += copy_size;
|
||||
remaining -= copy_size;
|
||||
}
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteExpGolombCodedInt(s32 value) {
|
||||
const s32 sign = value <= 0 ? 0 : 1;
|
||||
if (value < 0) {
|
||||
value = -value;
|
||||
}
|
||||
value = (value << 1) - sign;
|
||||
WriteExpGolombCodedUInt(value);
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteExpGolombCodedUInt(u32 value) {
|
||||
const s32 size = 32 - std::countl_zero(value + 1);
|
||||
WriteBits(1, size);
|
||||
|
||||
value -= (1U << (size - 1)) - 1;
|
||||
WriteBits(static_cast<s32>(value), size - 1);
|
||||
}
|
||||
|
||||
s32 H264BitWriter::GetFreeBufferBits() {
|
||||
if (buffer_pos == buffer_size) {
|
||||
Flush();
|
||||
}
|
||||
|
||||
return buffer_size - buffer_pos;
|
||||
}
|
||||
|
||||
void H264BitWriter::Flush() {
|
||||
if (buffer_pos == 0) {
|
||||
return;
|
||||
}
|
||||
byte_array.push_back(static_cast<u8>(buffer));
|
||||
|
||||
buffer = 0;
|
||||
buffer_pos = 0;
|
||||
}
|
||||
} // namespace Tegra::Decoder
|
||||
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
|
||||
#include "common/settings.h"
|
||||
#include "video_core/host1x/codecs/h264.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Tegra::Decoder {
|
||||
namespace {
|
||||
// ZigZag LUTs from libavcodec.
|
||||
constexpr std::array<u8, 64> zig_zag_direct{
|
||||
0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48,
|
||||
41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23,
|
||||
30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63,
|
||||
};
|
||||
|
||||
constexpr std::array<u8, 16> zig_zag_scan{
|
||||
0 + 0 * 4, 1 + 0 * 4, 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 2 + 0 * 4, 3 + 0 * 4, 2 + 1 * 4,
|
||||
1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4, 3 + 1 * 4, 3 + 2 * 4, 2 + 3 * 4, 3 + 3 * 4,
|
||||
};
|
||||
} // Anonymous namespace
|
||||
|
||||
H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||
|
||||
H264::~H264() = default;
|
||||
|
||||
const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state,
|
||||
bool is_first_frame) {
|
||||
H264DecoderContext context;
|
||||
host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context,
|
||||
sizeof(H264DecoderContext));
|
||||
|
||||
const s64 frame_number = context.h264_parameter_set.frame_number.Value();
|
||||
if (!is_first_frame && frame_number != 0) {
|
||||
frame.resize(context.stream_len);
|
||||
host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
|
||||
return frame;
|
||||
}
|
||||
|
||||
// Encode header
|
||||
H264BitWriter writer{};
|
||||
writer.WriteU(1, 24);
|
||||
writer.WriteU(0, 1);
|
||||
writer.WriteU(3, 2);
|
||||
writer.WriteU(7, 5);
|
||||
writer.WriteU(100, 8);
|
||||
writer.WriteU(0, 8);
|
||||
writer.WriteU(31, 8);
|
||||
writer.WriteUe(0);
|
||||
const u32 chroma_format_idc =
|
||||
static_cast<u32>(context.h264_parameter_set.chroma_format_idc.Value());
|
||||
writer.WriteUe(chroma_format_idc);
|
||||
if (chroma_format_idc == 3) {
|
||||
writer.WriteBit(false);
|
||||
}
|
||||
|
||||
writer.WriteUe(0);
|
||||
writer.WriteUe(0);
|
||||
writer.WriteBit(false); // QpprimeYZeroTransformBypassFlag
|
||||
writer.WriteBit(false); // Scaling matrix present flag
|
||||
|
||||
writer.WriteUe(static_cast<u32>(context.h264_parameter_set.log2_max_frame_num_minus4.Value()));
|
||||
|
||||
const auto order_cnt_type =
|
||||
static_cast<u32>(context.h264_parameter_set.pic_order_cnt_type.Value());
|
||||
writer.WriteUe(order_cnt_type);
|
||||
if (order_cnt_type == 0) {
|
||||
writer.WriteUe(context.h264_parameter_set.log2_max_pic_order_cnt_lsb_minus4);
|
||||
} else if (order_cnt_type == 1) {
|
||||
writer.WriteBit(context.h264_parameter_set.delta_pic_order_always_zero_flag != 0);
|
||||
|
||||
writer.WriteSe(0);
|
||||
writer.WriteSe(0);
|
||||
writer.WriteUe(0);
|
||||
}
|
||||
|
||||
const s32 pic_height = context.h264_parameter_set.frame_height_in_map_units /
|
||||
(context.h264_parameter_set.frame_mbs_only_flag ? 1 : 2);
|
||||
|
||||
// TODO (ameerj): Where do we get this number, it seems to be particular for each stream
|
||||
const auto nvdec_decoding = Settings::values.nvdec_emulation.GetValue();
|
||||
const bool uses_gpu_decoding = nvdec_decoding == Settings::NvdecEmulation::GPU;
|
||||
const u32 max_num_ref_frames = uses_gpu_decoding ? 6u : 16u;
|
||||
writer.WriteUe(max_num_ref_frames);
|
||||
writer.WriteBit(false);
|
||||
writer.WriteUe(context.h264_parameter_set.pic_width_in_mbs - 1);
|
||||
writer.WriteUe(pic_height - 1);
|
||||
writer.WriteBit(context.h264_parameter_set.frame_mbs_only_flag != 0);
|
||||
|
||||
if (!context.h264_parameter_set.frame_mbs_only_flag) {
|
||||
writer.WriteBit(context.h264_parameter_set.flags.mbaff_frame.Value() != 0);
|
||||
}
|
||||
|
||||
writer.WriteBit(context.h264_parameter_set.flags.direct_8x8_inference.Value() != 0);
|
||||
writer.WriteBit(false); // Frame cropping flag
|
||||
writer.WriteBit(false); // VUI parameter present flag
|
||||
|
||||
writer.End();
|
||||
|
||||
// H264 PPS
|
||||
writer.WriteU(1, 24);
|
||||
writer.WriteU(0, 1);
|
||||
writer.WriteU(3, 2);
|
||||
writer.WriteU(8, 5);
|
||||
|
||||
writer.WriteUe(0);
|
||||
writer.WriteUe(0);
|
||||
|
||||
writer.WriteBit(context.h264_parameter_set.entropy_coding_mode_flag != 0);
|
||||
writer.WriteBit(false);
|
||||
writer.WriteUe(0);
|
||||
writer.WriteUe(context.h264_parameter_set.num_refidx_l0_default_active);
|
||||
writer.WriteUe(context.h264_parameter_set.num_refidx_l1_default_active);
|
||||
writer.WriteBit(context.h264_parameter_set.flags.weighted_pred.Value() != 0);
|
||||
writer.WriteU(static_cast<s32>(context.h264_parameter_set.weighted_bipred_idc.Value()), 2);
|
||||
s32 pic_init_qp = static_cast<s32>(context.h264_parameter_set.pic_init_qp_minus26.Value());
|
||||
writer.WriteSe(pic_init_qp);
|
||||
writer.WriteSe(0);
|
||||
s32 chroma_qp_index_offset =
|
||||
static_cast<s32>(context.h264_parameter_set.chroma_qp_index_offset.Value());
|
||||
|
||||
writer.WriteSe(chroma_qp_index_offset);
|
||||
writer.WriteBit(context.h264_parameter_set.deblocking_filter_control_present_flag != 0);
|
||||
writer.WriteBit(context.h264_parameter_set.flags.constrained_intra_pred.Value() != 0);
|
||||
writer.WriteBit(context.h264_parameter_set.redundant_pic_cnt_present_flag != 0);
|
||||
writer.WriteBit(context.h264_parameter_set.transform_8x8_mode_flag != 0);
|
||||
|
||||
writer.WriteBit(true);
|
||||
|
||||
for (s32 index = 0; index < 6; index++) {
|
||||
writer.WriteBit(true);
|
||||
std::span<const u8> matrix{context.weight_scale};
|
||||
writer.WriteScalingList(matrix, index * 16, 16);
|
||||
}
|
||||
|
||||
if (context.h264_parameter_set.transform_8x8_mode_flag) {
|
||||
for (s32 index = 0; index < 2; index++) {
|
||||
writer.WriteBit(true);
|
||||
std::span<const u8> matrix{context.weight_scale_8x8};
|
||||
writer.WriteScalingList(matrix, index * 64, 64);
|
||||
}
|
||||
}
|
||||
|
||||
s32 chroma_qp_index_offset2 =
|
||||
static_cast<s32>(context.h264_parameter_set.second_chroma_qp_index_offset.Value());
|
||||
|
||||
writer.WriteSe(chroma_qp_index_offset2);
|
||||
|
||||
writer.End();
|
||||
|
||||
const auto& encoded_header = writer.GetByteArray();
|
||||
frame.resize(encoded_header.size() + context.stream_len);
|
||||
std::memcpy(frame.data(), encoded_header.data(), encoded_header.size());
|
||||
|
||||
host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
|
||||
frame.data() + encoded_header.size(), context.stream_len);
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
H264BitWriter::H264BitWriter() = default;
|
||||
|
||||
H264BitWriter::~H264BitWriter() = default;
|
||||
|
||||
void H264BitWriter::WriteU(s32 value, s32 value_sz) {
|
||||
WriteBits(value, value_sz);
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteSe(s32 value) {
|
||||
WriteExpGolombCodedInt(value);
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteUe(u32 value) {
|
||||
WriteExpGolombCodedUInt(value);
|
||||
}
|
||||
|
||||
void H264BitWriter::End() {
|
||||
WriteBit(true);
|
||||
Flush();
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteBit(bool state) {
|
||||
WriteBits(state ? 1 : 0, 1);
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteScalingList(std::span<const u8> list, s32 start, s32 count) {
|
||||
std::vector<u8> scan(count);
|
||||
if (count == 16) {
|
||||
std::memcpy(scan.data(), zig_zag_scan.data(), scan.size());
|
||||
} else {
|
||||
std::memcpy(scan.data(), zig_zag_direct.data(), scan.size());
|
||||
}
|
||||
u8 last_scale = 8;
|
||||
|
||||
for (s32 index = 0; index < count; index++) {
|
||||
const u8 value = list[start + scan[index]];
|
||||
const s32 delta_scale = static_cast<s32>(value - last_scale);
|
||||
|
||||
WriteSe(delta_scale);
|
||||
|
||||
last_scale = value;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<u8>& H264BitWriter::GetByteArray() {
|
||||
return byte_array;
|
||||
}
|
||||
|
||||
const std::vector<u8>& H264BitWriter::GetByteArray() const {
|
||||
return byte_array;
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteBits(s32 value, s32 bit_count) {
|
||||
s32 value_pos = 0;
|
||||
|
||||
s32 remaining = bit_count;
|
||||
|
||||
while (remaining > 0) {
|
||||
s32 copy_size = remaining;
|
||||
|
||||
const s32 free_bits = GetFreeBufferBits();
|
||||
|
||||
if (copy_size > free_bits) {
|
||||
copy_size = free_bits;
|
||||
}
|
||||
|
||||
const s32 mask = (1 << copy_size) - 1;
|
||||
|
||||
const s32 src_shift = (bit_count - value_pos) - copy_size;
|
||||
const s32 dst_shift = (buffer_size - buffer_pos) - copy_size;
|
||||
|
||||
buffer |= ((value >> src_shift) & mask) << dst_shift;
|
||||
|
||||
value_pos += copy_size;
|
||||
buffer_pos += copy_size;
|
||||
remaining -= copy_size;
|
||||
}
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteExpGolombCodedInt(s32 value) {
|
||||
const s32 sign = value <= 0 ? 0 : 1;
|
||||
if (value < 0) {
|
||||
value = -value;
|
||||
}
|
||||
value = (value << 1) - sign;
|
||||
WriteExpGolombCodedUInt(value);
|
||||
}
|
||||
|
||||
void H264BitWriter::WriteExpGolombCodedUInt(u32 value) {
|
||||
const s32 size = 32 - std::countl_zero(value + 1);
|
||||
WriteBits(1, size);
|
||||
|
||||
value -= (1U << (size - 1)) - 1;
|
||||
WriteBits(static_cast<s32>(value), size - 1);
|
||||
}
|
||||
|
||||
s32 H264BitWriter::GetFreeBufferBits() {
|
||||
if (buffer_pos == buffer_size) {
|
||||
Flush();
|
||||
}
|
||||
|
||||
return buffer_size - buffer_pos;
|
||||
}
|
||||
|
||||
void H264BitWriter::Flush() {
|
||||
if (buffer_pos == 0) {
|
||||
return;
|
||||
}
|
||||
byte_array.push_back(static_cast<u8>(buffer));
|
||||
|
||||
buffer = 0;
|
||||
buffer_pos = 0;
|
||||
}
|
||||
} // namespace Tegra::Decoder
|
||||
|
||||
@@ -1,177 +1,177 @@
|
||||
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <span>
|
||||
#include <vector>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
namespace Decoder {
|
||||
|
||||
class H264BitWriter {
|
||||
public:
|
||||
H264BitWriter();
|
||||
~H264BitWriter();
|
||||
|
||||
/// The following Write methods are based on clause 9.1 in the H.264 specification.
|
||||
/// WriteSe and WriteUe write in the Exp-Golomb-coded syntax
|
||||
void WriteU(s32 value, s32 value_sz);
|
||||
void WriteSe(s32 value);
|
||||
void WriteUe(u32 value);
|
||||
|
||||
/// Finalize the bitstream
|
||||
void End();
|
||||
|
||||
/// append a bit to the stream, equivalent value to the state parameter
|
||||
void WriteBit(bool state);
|
||||
|
||||
/// Based on section 7.3.2.1.1.1 and Table 7-4 in the H.264 specification
|
||||
/// Writes the scaling matrices of the sream
|
||||
void WriteScalingList(std::span<const u8> list, s32 start, s32 count);
|
||||
|
||||
/// Return the bitstream as a vector.
|
||||
[[nodiscard]] std::vector<u8>& GetByteArray();
|
||||
[[nodiscard]] const std::vector<u8>& GetByteArray() const;
|
||||
|
||||
private:
|
||||
void WriteBits(s32 value, s32 bit_count);
|
||||
void WriteExpGolombCodedInt(s32 value);
|
||||
void WriteExpGolombCodedUInt(u32 value);
|
||||
[[nodiscard]] s32 GetFreeBufferBits();
|
||||
void Flush();
|
||||
|
||||
s32 buffer_size{8};
|
||||
|
||||
s32 buffer{};
|
||||
s32 buffer_pos{};
|
||||
std::vector<u8> byte_array;
|
||||
};
|
||||
|
||||
class H264 {
|
||||
public:
|
||||
explicit H264(Host1x::Host1x& host1x);
|
||||
~H264();
|
||||
|
||||
/// Compose the H264 frame for FFmpeg decoding
|
||||
[[nodiscard]] const std::vector<u8>& ComposeFrame(
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame = false);
|
||||
|
||||
private:
|
||||
std::vector<u8> frame;
|
||||
Host1x::Host1x& host1x;
|
||||
|
||||
struct H264ParameterSet {
|
||||
s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00
|
||||
s32 delta_pic_order_always_zero_flag; ///< 0x04
|
||||
s32 frame_mbs_only_flag; ///< 0x08
|
||||
u32 pic_width_in_mbs; ///< 0x0C
|
||||
u32 frame_height_in_map_units; ///< 0x10
|
||||
union { ///< 0x14
|
||||
BitField<0, 2, u32> tile_format;
|
||||
BitField<2, 3, u32> gob_height;
|
||||
};
|
||||
u32 entropy_coding_mode_flag; ///< 0x18
|
||||
s32 pic_order_present_flag; ///< 0x1C
|
||||
s32 num_refidx_l0_default_active; ///< 0x20
|
||||
s32 num_refidx_l1_default_active; ///< 0x24
|
||||
s32 deblocking_filter_control_present_flag; ///< 0x28
|
||||
s32 redundant_pic_cnt_present_flag; ///< 0x2C
|
||||
u32 transform_8x8_mode_flag; ///< 0x30
|
||||
u32 pitch_luma; ///< 0x34
|
||||
u32 pitch_chroma; ///< 0x38
|
||||
u32 luma_top_offset; ///< 0x3C
|
||||
u32 luma_bot_offset; ///< 0x40
|
||||
u32 luma_frame_offset; ///< 0x44
|
||||
u32 chroma_top_offset; ///< 0x48
|
||||
u32 chroma_bot_offset; ///< 0x4C
|
||||
u32 chroma_frame_offset; ///< 0x50
|
||||
u32 hist_buffer_size; ///< 0x54
|
||||
union { ///< 0x58
|
||||
union {
|
||||
BitField<0, 1, u64> mbaff_frame;
|
||||
BitField<1, 1, u64> direct_8x8_inference;
|
||||
BitField<2, 1, u64> weighted_pred;
|
||||
BitField<3, 1, u64> constrained_intra_pred;
|
||||
BitField<4, 1, u64> ref_pic;
|
||||
BitField<5, 1, u64> field_pic;
|
||||
BitField<6, 1, u64> bottom_field;
|
||||
BitField<7, 1, u64> second_field;
|
||||
} flags;
|
||||
BitField<8, 4, u64> log2_max_frame_num_minus4;
|
||||
BitField<12, 2, u64> chroma_format_idc;
|
||||
BitField<14, 2, u64> pic_order_cnt_type;
|
||||
BitField<16, 6, s64> pic_init_qp_minus26;
|
||||
BitField<22, 5, s64> chroma_qp_index_offset;
|
||||
BitField<27, 5, s64> second_chroma_qp_index_offset;
|
||||
BitField<32, 2, u64> weighted_bipred_idc;
|
||||
BitField<34, 7, u64> curr_pic_idx;
|
||||
BitField<41, 5, u64> curr_col_idx;
|
||||
BitField<46, 16, u64> frame_number;
|
||||
BitField<62, 1, u64> frame_surfaces;
|
||||
BitField<63, 1, u64> output_memory_layout;
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(H264ParameterSet) == 0x60, "H264ParameterSet is an invalid size");
|
||||
|
||||
struct H264DecoderContext {
|
||||
INSERT_PADDING_WORDS_NOINIT(18); ///< 0x0000
|
||||
u32 stream_len; ///< 0x0048
|
||||
INSERT_PADDING_WORDS_NOINIT(3); ///< 0x004C
|
||||
H264ParameterSet h264_parameter_set; ///< 0x0058
|
||||
INSERT_PADDING_WORDS_NOINIT(66); ///< 0x00B8
|
||||
std::array<u8, 0x60> weight_scale; ///< 0x01C0
|
||||
std::array<u8, 0x80> weight_scale_8x8; ///< 0x0220
|
||||
};
|
||||
static_assert(sizeof(H264DecoderContext) == 0x2A0, "H264DecoderContext is an invalid size");
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(H264ParameterSet, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(log2_max_pic_order_cnt_lsb_minus4, 0x00);
|
||||
ASSERT_POSITION(delta_pic_order_always_zero_flag, 0x04);
|
||||
ASSERT_POSITION(frame_mbs_only_flag, 0x08);
|
||||
ASSERT_POSITION(pic_width_in_mbs, 0x0C);
|
||||
ASSERT_POSITION(frame_height_in_map_units, 0x10);
|
||||
ASSERT_POSITION(tile_format, 0x14);
|
||||
ASSERT_POSITION(entropy_coding_mode_flag, 0x18);
|
||||
ASSERT_POSITION(pic_order_present_flag, 0x1C);
|
||||
ASSERT_POSITION(num_refidx_l0_default_active, 0x20);
|
||||
ASSERT_POSITION(num_refidx_l1_default_active, 0x24);
|
||||
ASSERT_POSITION(deblocking_filter_control_present_flag, 0x28);
|
||||
ASSERT_POSITION(redundant_pic_cnt_present_flag, 0x2C);
|
||||
ASSERT_POSITION(transform_8x8_mode_flag, 0x30);
|
||||
ASSERT_POSITION(pitch_luma, 0x34);
|
||||
ASSERT_POSITION(pitch_chroma, 0x38);
|
||||
ASSERT_POSITION(luma_top_offset, 0x3C);
|
||||
ASSERT_POSITION(luma_bot_offset, 0x40);
|
||||
ASSERT_POSITION(luma_frame_offset, 0x44);
|
||||
ASSERT_POSITION(chroma_top_offset, 0x48);
|
||||
ASSERT_POSITION(chroma_bot_offset, 0x4C);
|
||||
ASSERT_POSITION(chroma_frame_offset, 0x50);
|
||||
ASSERT_POSITION(hist_buffer_size, 0x54);
|
||||
ASSERT_POSITION(flags, 0x58);
|
||||
#undef ASSERT_POSITION
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(H264DecoderContext, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(stream_len, 0x48);
|
||||
ASSERT_POSITION(h264_parameter_set, 0x58);
|
||||
ASSERT_POSITION(weight_scale, 0x1C0);
|
||||
#undef ASSERT_POSITION
|
||||
};
|
||||
|
||||
} // namespace Decoder
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <span>
|
||||
#include <vector>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
namespace Decoder {
|
||||
|
||||
class H264BitWriter {
|
||||
public:
|
||||
H264BitWriter();
|
||||
~H264BitWriter();
|
||||
|
||||
/// The following Write methods are based on clause 9.1 in the H.264 specification.
|
||||
/// WriteSe and WriteUe write in the Exp-Golomb-coded syntax
|
||||
void WriteU(s32 value, s32 value_sz);
|
||||
void WriteSe(s32 value);
|
||||
void WriteUe(u32 value);
|
||||
|
||||
/// Finalize the bitstream
|
||||
void End();
|
||||
|
||||
/// append a bit to the stream, equivalent value to the state parameter
|
||||
void WriteBit(bool state);
|
||||
|
||||
/// Based on section 7.3.2.1.1.1 and Table 7-4 in the H.264 specification
|
||||
/// Writes the scaling matrices of the sream
|
||||
void WriteScalingList(std::span<const u8> list, s32 start, s32 count);
|
||||
|
||||
/// Return the bitstream as a vector.
|
||||
[[nodiscard]] std::vector<u8>& GetByteArray();
|
||||
[[nodiscard]] const std::vector<u8>& GetByteArray() const;
|
||||
|
||||
private:
|
||||
void WriteBits(s32 value, s32 bit_count);
|
||||
void WriteExpGolombCodedInt(s32 value);
|
||||
void WriteExpGolombCodedUInt(u32 value);
|
||||
[[nodiscard]] s32 GetFreeBufferBits();
|
||||
void Flush();
|
||||
|
||||
s32 buffer_size{8};
|
||||
|
||||
s32 buffer{};
|
||||
s32 buffer_pos{};
|
||||
std::vector<u8> byte_array;
|
||||
};
|
||||
|
||||
class H264 {
|
||||
public:
|
||||
explicit H264(Host1x::Host1x& host1x);
|
||||
~H264();
|
||||
|
||||
/// Compose the H264 frame for FFmpeg decoding
|
||||
[[nodiscard]] const std::vector<u8>& ComposeFrame(
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame = false);
|
||||
|
||||
private:
|
||||
std::vector<u8> frame;
|
||||
Host1x::Host1x& host1x;
|
||||
|
||||
struct H264ParameterSet {
|
||||
s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00
|
||||
s32 delta_pic_order_always_zero_flag; ///< 0x04
|
||||
s32 frame_mbs_only_flag; ///< 0x08
|
||||
u32 pic_width_in_mbs; ///< 0x0C
|
||||
u32 frame_height_in_map_units; ///< 0x10
|
||||
union { ///< 0x14
|
||||
BitField<0, 2, u32> tile_format;
|
||||
BitField<2, 3, u32> gob_height;
|
||||
};
|
||||
u32 entropy_coding_mode_flag; ///< 0x18
|
||||
s32 pic_order_present_flag; ///< 0x1C
|
||||
s32 num_refidx_l0_default_active; ///< 0x20
|
||||
s32 num_refidx_l1_default_active; ///< 0x24
|
||||
s32 deblocking_filter_control_present_flag; ///< 0x28
|
||||
s32 redundant_pic_cnt_present_flag; ///< 0x2C
|
||||
u32 transform_8x8_mode_flag; ///< 0x30
|
||||
u32 pitch_luma; ///< 0x34
|
||||
u32 pitch_chroma; ///< 0x38
|
||||
u32 luma_top_offset; ///< 0x3C
|
||||
u32 luma_bot_offset; ///< 0x40
|
||||
u32 luma_frame_offset; ///< 0x44
|
||||
u32 chroma_top_offset; ///< 0x48
|
||||
u32 chroma_bot_offset; ///< 0x4C
|
||||
u32 chroma_frame_offset; ///< 0x50
|
||||
u32 hist_buffer_size; ///< 0x54
|
||||
union { ///< 0x58
|
||||
union {
|
||||
BitField<0, 1, u64> mbaff_frame;
|
||||
BitField<1, 1, u64> direct_8x8_inference;
|
||||
BitField<2, 1, u64> weighted_pred;
|
||||
BitField<3, 1, u64> constrained_intra_pred;
|
||||
BitField<4, 1, u64> ref_pic;
|
||||
BitField<5, 1, u64> field_pic;
|
||||
BitField<6, 1, u64> bottom_field;
|
||||
BitField<7, 1, u64> second_field;
|
||||
} flags;
|
||||
BitField<8, 4, u64> log2_max_frame_num_minus4;
|
||||
BitField<12, 2, u64> chroma_format_idc;
|
||||
BitField<14, 2, u64> pic_order_cnt_type;
|
||||
BitField<16, 6, s64> pic_init_qp_minus26;
|
||||
BitField<22, 5, s64> chroma_qp_index_offset;
|
||||
BitField<27, 5, s64> second_chroma_qp_index_offset;
|
||||
BitField<32, 2, u64> weighted_bipred_idc;
|
||||
BitField<34, 7, u64> curr_pic_idx;
|
||||
BitField<41, 5, u64> curr_col_idx;
|
||||
BitField<46, 16, u64> frame_number;
|
||||
BitField<62, 1, u64> frame_surfaces;
|
||||
BitField<63, 1, u64> output_memory_layout;
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(H264ParameterSet) == 0x60, "H264ParameterSet is an invalid size");
|
||||
|
||||
struct H264DecoderContext {
|
||||
INSERT_PADDING_WORDS_NOINIT(18); ///< 0x0000
|
||||
u32 stream_len; ///< 0x0048
|
||||
INSERT_PADDING_WORDS_NOINIT(3); ///< 0x004C
|
||||
H264ParameterSet h264_parameter_set; ///< 0x0058
|
||||
INSERT_PADDING_WORDS_NOINIT(66); ///< 0x00B8
|
||||
std::array<u8, 0x60> weight_scale; ///< 0x01C0
|
||||
std::array<u8, 0x80> weight_scale_8x8; ///< 0x0220
|
||||
};
|
||||
static_assert(sizeof(H264DecoderContext) == 0x2A0, "H264DecoderContext is an invalid size");
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(H264ParameterSet, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(log2_max_pic_order_cnt_lsb_minus4, 0x00);
|
||||
ASSERT_POSITION(delta_pic_order_always_zero_flag, 0x04);
|
||||
ASSERT_POSITION(frame_mbs_only_flag, 0x08);
|
||||
ASSERT_POSITION(pic_width_in_mbs, 0x0C);
|
||||
ASSERT_POSITION(frame_height_in_map_units, 0x10);
|
||||
ASSERT_POSITION(tile_format, 0x14);
|
||||
ASSERT_POSITION(entropy_coding_mode_flag, 0x18);
|
||||
ASSERT_POSITION(pic_order_present_flag, 0x1C);
|
||||
ASSERT_POSITION(num_refidx_l0_default_active, 0x20);
|
||||
ASSERT_POSITION(num_refidx_l1_default_active, 0x24);
|
||||
ASSERT_POSITION(deblocking_filter_control_present_flag, 0x28);
|
||||
ASSERT_POSITION(redundant_pic_cnt_present_flag, 0x2C);
|
||||
ASSERT_POSITION(transform_8x8_mode_flag, 0x30);
|
||||
ASSERT_POSITION(pitch_luma, 0x34);
|
||||
ASSERT_POSITION(pitch_chroma, 0x38);
|
||||
ASSERT_POSITION(luma_top_offset, 0x3C);
|
||||
ASSERT_POSITION(luma_bot_offset, 0x40);
|
||||
ASSERT_POSITION(luma_frame_offset, 0x44);
|
||||
ASSERT_POSITION(chroma_top_offset, 0x48);
|
||||
ASSERT_POSITION(chroma_bot_offset, 0x4C);
|
||||
ASSERT_POSITION(chroma_frame_offset, 0x50);
|
||||
ASSERT_POSITION(hist_buffer_size, 0x54);
|
||||
ASSERT_POSITION(flags, 0x58);
|
||||
#undef ASSERT_POSITION
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(H264DecoderContext, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(stream_len, 0x48);
|
||||
ASSERT_POSITION(h264_parameter_set, 0x58);
|
||||
ASSERT_POSITION(weight_scale, 0x1C0);
|
||||
#undef ASSERT_POSITION
|
||||
};
|
||||
|
||||
} // namespace Decoder
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,53 +1,53 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "video_core/host1x/codecs/vp8.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Tegra::Decoder {
|
||||
VP8::VP8(Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||
|
||||
VP8::~VP8() = default;
|
||||
|
||||
const std::vector<u8>& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
|
||||
VP8PictureInfo info;
|
||||
host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
|
||||
|
||||
const bool is_key_frame = info.key_frame == 1u;
|
||||
const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size);
|
||||
const size_t header_size = is_key_frame ? 10u : 3u;
|
||||
frame.resize(header_size + bitstream_size);
|
||||
|
||||
// Based on page 30 of the VP8 specification.
|
||||
// https://datatracker.ietf.org/doc/rfc6386/
|
||||
frame[0] = is_key_frame ? 0u : 1u; // 1-bit frame type (0: keyframe, 1: interframes).
|
||||
frame[0] |= static_cast<u8>((info.version & 7u) << 1u); // 3-bit version number
|
||||
frame[0] |= static_cast<u8>(1u << 4u); // 1-bit show_frame flag
|
||||
|
||||
// The next 19-bits are the first partition size
|
||||
frame[0] |= static_cast<u8>((info.first_part_size & 7u) << 5u);
|
||||
frame[1] = static_cast<u8>((info.first_part_size & 0x7f8u) >> 3u);
|
||||
frame[2] = static_cast<u8>((info.first_part_size & 0x7f800u) >> 11u);
|
||||
|
||||
if (is_key_frame) {
|
||||
frame[3] = 0x9du;
|
||||
frame[4] = 0x01u;
|
||||
frame[5] = 0x2au;
|
||||
// TODO(ameerj): Horizontal/Vertical Scale
|
||||
// 16 bits: (2 bits Horizontal Scale << 14) | Width (14 bits)
|
||||
frame[6] = static_cast<u8>(info.frame_width & 0xff);
|
||||
frame[7] = static_cast<u8>(((info.frame_width >> 8) & 0x3f));
|
||||
// 16 bits:(2 bits Vertical Scale << 14) | Height (14 bits)
|
||||
frame[8] = static_cast<u8>(info.frame_height & 0xff);
|
||||
frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f));
|
||||
}
|
||||
const u64 bitstream_offset = state.frame_bitstream_offset;
|
||||
host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
} // namespace Tegra::Decoder
|
||||
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "video_core/host1x/codecs/vp8.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Tegra::Decoder {
|
||||
VP8::VP8(Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||
|
||||
VP8::~VP8() = default;
|
||||
|
||||
const std::vector<u8>& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
|
||||
VP8PictureInfo info;
|
||||
host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
|
||||
|
||||
const bool is_key_frame = info.key_frame == 1u;
|
||||
const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size);
|
||||
const size_t header_size = is_key_frame ? 10u : 3u;
|
||||
frame.resize(header_size + bitstream_size);
|
||||
|
||||
// Based on page 30 of the VP8 specification.
|
||||
// https://datatracker.ietf.org/doc/rfc6386/
|
||||
frame[0] = is_key_frame ? 0u : 1u; // 1-bit frame type (0: keyframe, 1: interframes).
|
||||
frame[0] |= static_cast<u8>((info.version & 7u) << 1u); // 3-bit version number
|
||||
frame[0] |= static_cast<u8>(1u << 4u); // 1-bit show_frame flag
|
||||
|
||||
// The next 19-bits are the first partition size
|
||||
frame[0] |= static_cast<u8>((info.first_part_size & 7u) << 5u);
|
||||
frame[1] = static_cast<u8>((info.first_part_size & 0x7f8u) >> 3u);
|
||||
frame[2] = static_cast<u8>((info.first_part_size & 0x7f800u) >> 11u);
|
||||
|
||||
if (is_key_frame) {
|
||||
frame[3] = 0x9du;
|
||||
frame[4] = 0x01u;
|
||||
frame[5] = 0x2au;
|
||||
// TODO(ameerj): Horizontal/Vertical Scale
|
||||
// 16 bits: (2 bits Horizontal Scale << 14) | Width (14 bits)
|
||||
frame[6] = static_cast<u8>(info.frame_width & 0xff);
|
||||
frame[7] = static_cast<u8>(((info.frame_width >> 8) & 0x3f));
|
||||
// 16 bits:(2 bits Vertical Scale << 14) | Height (14 bits)
|
||||
frame[8] = static_cast<u8>(info.frame_height & 0xff);
|
||||
frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f));
|
||||
}
|
||||
const u64 bitstream_offset = state.frame_bitstream_offset;
|
||||
host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
} // namespace Tegra::Decoder
|
||||
|
||||
@@ -1,78 +1,78 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
namespace Decoder {
|
||||
|
||||
class VP8 {
|
||||
public:
|
||||
explicit VP8(Host1x::Host1x& host1x);
|
||||
~VP8();
|
||||
|
||||
/// Compose the VP8 frame for FFmpeg decoding
|
||||
[[nodiscard]] const std::vector<u8>& ComposeFrame(
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
|
||||
private:
|
||||
std::vector<u8> frame;
|
||||
Host1x::Host1x& host1x;
|
||||
|
||||
struct VP8PictureInfo {
|
||||
INSERT_PADDING_WORDS_NOINIT(14);
|
||||
u16 frame_width; // actual frame width
|
||||
u16 frame_height; // actual frame height
|
||||
u8 key_frame;
|
||||
u8 version;
|
||||
union {
|
||||
u8 raw;
|
||||
BitField<0, 2, u8> tile_format;
|
||||
BitField<2, 3, u8> gob_height;
|
||||
BitField<5, 3, u8> reserverd_surface_format;
|
||||
};
|
||||
u8 error_conceal_on; // 1: error conceal on; 0: off
|
||||
u32 first_part_size; // the size of first partition(frame header and mb header partition)
|
||||
u32 hist_buffer_size; // in units of 256
|
||||
u32 vld_buffer_size; // in units of 1
|
||||
// Current frame buffers
|
||||
std::array<u32, 2> frame_stride; // [y_c]
|
||||
u32 luma_top_offset; // offset of luma top field in units of 256
|
||||
u32 luma_bot_offset; // offset of luma bottom field in units of 256
|
||||
u32 luma_frame_offset; // offset of luma frame in units of 256
|
||||
u32 chroma_top_offset; // offset of chroma top field in units of 256
|
||||
u32 chroma_bot_offset; // offset of chroma bottom field in units of 256
|
||||
u32 chroma_frame_offset; // offset of chroma frame in units of 256
|
||||
|
||||
INSERT_PADDING_BYTES_NOINIT(0x1c); // NvdecDisplayParams
|
||||
|
||||
// Decode picture buffer related
|
||||
s8 current_output_memory_layout;
|
||||
// output NV12/NV24 setting. index 0: golden; 1: altref; 2: last
|
||||
std::array<s8, 3> output_memory_layout;
|
||||
|
||||
u8 segmentation_feature_data_update;
|
||||
INSERT_PADDING_BYTES_NOINIT(3);
|
||||
|
||||
// ucode return result
|
||||
u32 result_value;
|
||||
std::array<u32, 8> partition_offset;
|
||||
INSERT_PADDING_WORDS_NOINIT(3);
|
||||
};
|
||||
static_assert(sizeof(VP8PictureInfo) == 0xc0, "PictureInfo is an invalid size");
|
||||
};
|
||||
|
||||
} // namespace Decoder
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
namespace Decoder {
|
||||
|
||||
class VP8 {
|
||||
public:
|
||||
explicit VP8(Host1x::Host1x& host1x);
|
||||
~VP8();
|
||||
|
||||
/// Compose the VP8 frame for FFmpeg decoding
|
||||
[[nodiscard]] const std::vector<u8>& ComposeFrame(
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
|
||||
private:
|
||||
std::vector<u8> frame;
|
||||
Host1x::Host1x& host1x;
|
||||
|
||||
struct VP8PictureInfo {
|
||||
INSERT_PADDING_WORDS_NOINIT(14);
|
||||
u16 frame_width; // actual frame width
|
||||
u16 frame_height; // actual frame height
|
||||
u8 key_frame;
|
||||
u8 version;
|
||||
union {
|
||||
u8 raw;
|
||||
BitField<0, 2, u8> tile_format;
|
||||
BitField<2, 3, u8> gob_height;
|
||||
BitField<5, 3, u8> reserverd_surface_format;
|
||||
};
|
||||
u8 error_conceal_on; // 1: error conceal on; 0: off
|
||||
u32 first_part_size; // the size of first partition(frame header and mb header partition)
|
||||
u32 hist_buffer_size; // in units of 256
|
||||
u32 vld_buffer_size; // in units of 1
|
||||
// Current frame buffers
|
||||
std::array<u32, 2> frame_stride; // [y_c]
|
||||
u32 luma_top_offset; // offset of luma top field in units of 256
|
||||
u32 luma_bot_offset; // offset of luma bottom field in units of 256
|
||||
u32 luma_frame_offset; // offset of luma frame in units of 256
|
||||
u32 chroma_top_offset; // offset of chroma top field in units of 256
|
||||
u32 chroma_bot_offset; // offset of chroma bottom field in units of 256
|
||||
u32 chroma_frame_offset; // offset of chroma frame in units of 256
|
||||
|
||||
INSERT_PADDING_BYTES_NOINIT(0x1c); // NvdecDisplayParams
|
||||
|
||||
// Decode picture buffer related
|
||||
s8 current_output_memory_layout;
|
||||
// output NV12/NV24 setting. index 0: golden; 1: altref; 2: last
|
||||
std::array<s8, 3> output_memory_layout;
|
||||
|
||||
u8 segmentation_feature_data_update;
|
||||
INSERT_PADDING_BYTES_NOINIT(3);
|
||||
|
||||
// ucode return result
|
||||
u32 result_value;
|
||||
std::array<u32, 8> partition_offset;
|
||||
INSERT_PADDING_WORDS_NOINIT(3);
|
||||
};
|
||||
static_assert(sizeof(VP8PictureInfo) == 0xc0, "PictureInfo is an invalid size");
|
||||
};
|
||||
|
||||
} // namespace Decoder
|
||||
} // namespace Tegra
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,198 +1,198 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/stream.h"
|
||||
#include "video_core/host1x/codecs/vp9_types.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
namespace Decoder {
|
||||
|
||||
/// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the
|
||||
/// VP9 header bitstreams.
|
||||
|
||||
class VpxRangeEncoder {
|
||||
public:
|
||||
VpxRangeEncoder();
|
||||
~VpxRangeEncoder();
|
||||
|
||||
VpxRangeEncoder(const VpxRangeEncoder&) = delete;
|
||||
VpxRangeEncoder& operator=(const VpxRangeEncoder&) = delete;
|
||||
|
||||
VpxRangeEncoder(VpxRangeEncoder&&) = default;
|
||||
VpxRangeEncoder& operator=(VpxRangeEncoder&&) = default;
|
||||
|
||||
/// Writes the rightmost value_size bits from value into the stream
|
||||
void Write(s32 value, s32 value_size);
|
||||
|
||||
/// Writes a single bit with half probability
|
||||
void Write(bool bit);
|
||||
|
||||
/// Writes a bit to the base_stream encoded with probability
|
||||
void Write(bool bit, s32 probability);
|
||||
|
||||
/// Signal the end of the bitstream
|
||||
void End();
|
||||
|
||||
[[nodiscard]] std::vector<u8>& GetBuffer() {
|
||||
return base_stream.GetBuffer();
|
||||
}
|
||||
|
||||
[[nodiscard]] const std::vector<u8>& GetBuffer() const {
|
||||
return base_stream.GetBuffer();
|
||||
}
|
||||
|
||||
private:
|
||||
u8 PeekByte();
|
||||
Common::Stream base_stream{};
|
||||
u32 low_value{};
|
||||
u32 range{0xff};
|
||||
s32 count{-24};
|
||||
s32 half_probability{128};
|
||||
};
|
||||
|
||||
class VpxBitStreamWriter {
|
||||
public:
|
||||
VpxBitStreamWriter();
|
||||
~VpxBitStreamWriter();
|
||||
|
||||
VpxBitStreamWriter(const VpxBitStreamWriter&) = delete;
|
||||
VpxBitStreamWriter& operator=(const VpxBitStreamWriter&) = delete;
|
||||
|
||||
VpxBitStreamWriter(VpxBitStreamWriter&&) = default;
|
||||
VpxBitStreamWriter& operator=(VpxBitStreamWriter&&) = default;
|
||||
|
||||
/// Write an unsigned integer value
|
||||
void WriteU(u32 value, u32 value_size);
|
||||
|
||||
/// Write a signed integer value
|
||||
void WriteS(s32 value, u32 value_size);
|
||||
|
||||
/// Based on 6.2.10 of VP9 Spec, writes a delta coded value
|
||||
void WriteDeltaQ(u32 value);
|
||||
|
||||
/// Write a single bit.
|
||||
void WriteBit(bool state);
|
||||
|
||||
/// Pushes current buffer into buffer_array, resets buffer
|
||||
void Flush();
|
||||
|
||||
/// Returns byte_array
|
||||
[[nodiscard]] std::vector<u8>& GetByteArray();
|
||||
|
||||
/// Returns const byte_array
|
||||
[[nodiscard]] const std::vector<u8>& GetByteArray() const;
|
||||
|
||||
private:
|
||||
/// Write bit_count bits from value into buffer
|
||||
void WriteBits(u32 value, u32 bit_count);
|
||||
|
||||
/// Gets next available position in buffer, invokes Flush() if buffer is full
|
||||
s32 GetFreeBufferBits();
|
||||
|
||||
s32 buffer_size{8};
|
||||
|
||||
s32 buffer{};
|
||||
s32 buffer_pos{};
|
||||
std::vector<u8> byte_array;
|
||||
};
|
||||
|
||||
class VP9 {
|
||||
public:
|
||||
explicit VP9(Host1x::Host1x& host1x);
|
||||
~VP9();
|
||||
|
||||
VP9(const VP9&) = delete;
|
||||
VP9& operator=(const VP9&) = delete;
|
||||
|
||||
VP9(VP9&&) = default;
|
||||
VP9& operator=(VP9&&) = delete;
|
||||
|
||||
/// Composes the VP9 frame from the GPU state information.
|
||||
/// Based on the official VP9 spec documentation
|
||||
void ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
|
||||
/// Returns true if the most recent frame was a hidden frame.
|
||||
[[nodiscard]] bool WasFrameHidden() const {
|
||||
return !current_frame_info.show_frame;
|
||||
}
|
||||
|
||||
/// Returns a const reference to the composed frame data.
|
||||
[[nodiscard]] const std::vector<u8>& GetFrameBytes() const {
|
||||
return frame;
|
||||
}
|
||||
|
||||
private:
|
||||
/// Generates compressed header probability updates in the bitstream writer
|
||||
template <typename T, std::size_t N>
|
||||
void WriteProbabilityUpdate(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
|
||||
const std::array<T, N>& old_prob);
|
||||
|
||||
/// Generates compressed header probability updates in the bitstream writer
|
||||
/// If probs are not equal, WriteProbabilityDelta is invoked
|
||||
void WriteProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
|
||||
|
||||
/// Generates compressed header probability deltas in the bitstream writer
|
||||
void WriteProbabilityDelta(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
|
||||
|
||||
/// Inverse of 6.3.4 Decode term subexp
|
||||
void EncodeTermSubExp(VpxRangeEncoder& writer, s32 value);
|
||||
|
||||
/// Writes if the value is less than the test value
|
||||
bool WriteLessThan(VpxRangeEncoder& writer, s32 value, s32 test);
|
||||
|
||||
/// Writes probability updates for the Coef probabilities
|
||||
void WriteCoefProbabilityUpdate(VpxRangeEncoder& writer, s32 tx_mode,
|
||||
const std::array<u8, 1728>& new_prob,
|
||||
const std::array<u8, 1728>& old_prob);
|
||||
|
||||
/// Write probabilities for 4-byte aligned structures
|
||||
template <typename T, std::size_t N>
|
||||
void WriteProbabilityUpdateAligned4(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
|
||||
const std::array<T, N>& old_prob);
|
||||
|
||||
/// Write motion vector probability updates. 6.3.17 in the spec
|
||||
void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
|
||||
|
||||
/// Returns VP9 information from NVDEC provided offset and size
|
||||
[[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
|
||||
/// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct
|
||||
void InsertEntropy(u64 offset, Vp9EntropyProbs& dst);
|
||||
|
||||
/// Returns frame to be decoded after buffering
|
||||
[[nodiscard]] Vp9FrameContainer GetCurrentFrame(
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
|
||||
/// Use NVDEC providied information to compose the headers for the current frame
|
||||
[[nodiscard]] std::vector<u8> ComposeCompressedHeader();
|
||||
[[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader();
|
||||
|
||||
Host1x::Host1x& host1x;
|
||||
std::vector<u8> frame;
|
||||
|
||||
std::array<s8, 4> loop_filter_ref_deltas{};
|
||||
std::array<s8, 2> loop_filter_mode_deltas{};
|
||||
|
||||
Vp9FrameContainer next_frame{};
|
||||
std::array<Vp9EntropyProbs, 4> frame_ctxs{};
|
||||
bool swap_ref_indices{};
|
||||
|
||||
Vp9PictureInfo current_frame_info{};
|
||||
Vp9EntropyProbs prev_frame_probs{};
|
||||
};
|
||||
|
||||
} // namespace Decoder
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/stream.h"
|
||||
#include "video_core/host1x/codecs/vp9_types.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
namespace Decoder {
|
||||
|
||||
/// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the
|
||||
/// VP9 header bitstreams.
|
||||
|
||||
class VpxRangeEncoder {
|
||||
public:
|
||||
VpxRangeEncoder();
|
||||
~VpxRangeEncoder();
|
||||
|
||||
VpxRangeEncoder(const VpxRangeEncoder&) = delete;
|
||||
VpxRangeEncoder& operator=(const VpxRangeEncoder&) = delete;
|
||||
|
||||
VpxRangeEncoder(VpxRangeEncoder&&) = default;
|
||||
VpxRangeEncoder& operator=(VpxRangeEncoder&&) = default;
|
||||
|
||||
/// Writes the rightmost value_size bits from value into the stream
|
||||
void Write(s32 value, s32 value_size);
|
||||
|
||||
/// Writes a single bit with half probability
|
||||
void Write(bool bit);
|
||||
|
||||
/// Writes a bit to the base_stream encoded with probability
|
||||
void Write(bool bit, s32 probability);
|
||||
|
||||
/// Signal the end of the bitstream
|
||||
void End();
|
||||
|
||||
[[nodiscard]] std::vector<u8>& GetBuffer() {
|
||||
return base_stream.GetBuffer();
|
||||
}
|
||||
|
||||
[[nodiscard]] const std::vector<u8>& GetBuffer() const {
|
||||
return base_stream.GetBuffer();
|
||||
}
|
||||
|
||||
private:
|
||||
u8 PeekByte();
|
||||
Common::Stream base_stream{};
|
||||
u32 low_value{};
|
||||
u32 range{0xff};
|
||||
s32 count{-24};
|
||||
s32 half_probability{128};
|
||||
};
|
||||
|
||||
class VpxBitStreamWriter {
|
||||
public:
|
||||
VpxBitStreamWriter();
|
||||
~VpxBitStreamWriter();
|
||||
|
||||
VpxBitStreamWriter(const VpxBitStreamWriter&) = delete;
|
||||
VpxBitStreamWriter& operator=(const VpxBitStreamWriter&) = delete;
|
||||
|
||||
VpxBitStreamWriter(VpxBitStreamWriter&&) = default;
|
||||
VpxBitStreamWriter& operator=(VpxBitStreamWriter&&) = default;
|
||||
|
||||
/// Write an unsigned integer value
|
||||
void WriteU(u32 value, u32 value_size);
|
||||
|
||||
/// Write a signed integer value
|
||||
void WriteS(s32 value, u32 value_size);
|
||||
|
||||
/// Based on 6.2.10 of VP9 Spec, writes a delta coded value
|
||||
void WriteDeltaQ(u32 value);
|
||||
|
||||
/// Write a single bit.
|
||||
void WriteBit(bool state);
|
||||
|
||||
/// Pushes current buffer into buffer_array, resets buffer
|
||||
void Flush();
|
||||
|
||||
/// Returns byte_array
|
||||
[[nodiscard]] std::vector<u8>& GetByteArray();
|
||||
|
||||
/// Returns const byte_array
|
||||
[[nodiscard]] const std::vector<u8>& GetByteArray() const;
|
||||
|
||||
private:
|
||||
/// Write bit_count bits from value into buffer
|
||||
void WriteBits(u32 value, u32 bit_count);
|
||||
|
||||
/// Gets next available position in buffer, invokes Flush() if buffer is full
|
||||
s32 GetFreeBufferBits();
|
||||
|
||||
s32 buffer_size{8};
|
||||
|
||||
s32 buffer{};
|
||||
s32 buffer_pos{};
|
||||
std::vector<u8> byte_array;
|
||||
};
|
||||
|
||||
class VP9 {
|
||||
public:
|
||||
explicit VP9(Host1x::Host1x& host1x);
|
||||
~VP9();
|
||||
|
||||
VP9(const VP9&) = delete;
|
||||
VP9& operator=(const VP9&) = delete;
|
||||
|
||||
VP9(VP9&&) = default;
|
||||
VP9& operator=(VP9&&) = delete;
|
||||
|
||||
/// Composes the VP9 frame from the GPU state information.
|
||||
/// Based on the official VP9 spec documentation
|
||||
void ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
|
||||
/// Returns true if the most recent frame was a hidden frame.
|
||||
[[nodiscard]] bool WasFrameHidden() const {
|
||||
return !current_frame_info.show_frame;
|
||||
}
|
||||
|
||||
/// Returns a const reference to the composed frame data.
|
||||
[[nodiscard]] const std::vector<u8>& GetFrameBytes() const {
|
||||
return frame;
|
||||
}
|
||||
|
||||
private:
|
||||
/// Generates compressed header probability updates in the bitstream writer
|
||||
template <typename T, std::size_t N>
|
||||
void WriteProbabilityUpdate(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
|
||||
const std::array<T, N>& old_prob);
|
||||
|
||||
/// Generates compressed header probability updates in the bitstream writer
|
||||
/// If probs are not equal, WriteProbabilityDelta is invoked
|
||||
void WriteProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
|
||||
|
||||
/// Generates compressed header probability deltas in the bitstream writer
|
||||
void WriteProbabilityDelta(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
|
||||
|
||||
/// Inverse of 6.3.4 Decode term subexp
|
||||
void EncodeTermSubExp(VpxRangeEncoder& writer, s32 value);
|
||||
|
||||
/// Writes if the value is less than the test value
|
||||
bool WriteLessThan(VpxRangeEncoder& writer, s32 value, s32 test);
|
||||
|
||||
/// Writes probability updates for the Coef probabilities
|
||||
void WriteCoefProbabilityUpdate(VpxRangeEncoder& writer, s32 tx_mode,
|
||||
const std::array<u8, 1728>& new_prob,
|
||||
const std::array<u8, 1728>& old_prob);
|
||||
|
||||
/// Write probabilities for 4-byte aligned structures
|
||||
template <typename T, std::size_t N>
|
||||
void WriteProbabilityUpdateAligned4(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
|
||||
const std::array<T, N>& old_prob);
|
||||
|
||||
/// Write motion vector probability updates. 6.3.17 in the spec
|
||||
void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
|
||||
|
||||
/// Returns VP9 information from NVDEC provided offset and size
|
||||
[[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
|
||||
/// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct
|
||||
void InsertEntropy(u64 offset, Vp9EntropyProbs& dst);
|
||||
|
||||
/// Returns frame to be decoded after buffering
|
||||
[[nodiscard]] Vp9FrameContainer GetCurrentFrame(
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
|
||||
/// Use NVDEC providied information to compose the headers for the current frame
|
||||
[[nodiscard]] std::vector<u8> ComposeCompressedHeader();
|
||||
[[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader();
|
||||
|
||||
Host1x::Host1x& host1x;
|
||||
std::vector<u8> frame;
|
||||
|
||||
std::array<s8, 4> loop_filter_ref_deltas{};
|
||||
std::array<s8, 2> loop_filter_mode_deltas{};
|
||||
|
||||
Vp9FrameContainer next_frame{};
|
||||
std::array<Vp9EntropyProbs, 4> frame_ctxs{};
|
||||
bool swap_ref_indices{};
|
||||
|
||||
Vp9PictureInfo current_frame_info{};
|
||||
Vp9EntropyProbs prev_frame_probs{};
|
||||
};
|
||||
|
||||
} // namespace Decoder
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,305 +1,305 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Decoder {
|
||||
struct Vp9FrameDimensions {
|
||||
s16 width;
|
||||
s16 height;
|
||||
s16 luma_pitch;
|
||||
s16 chroma_pitch;
|
||||
};
|
||||
static_assert(sizeof(Vp9FrameDimensions) == 0x8, "Vp9 Vp9FrameDimensions is an invalid size");
|
||||
|
||||
enum class FrameFlags : u32 {
|
||||
IsKeyFrame = 1 << 0,
|
||||
LastFrameIsKeyFrame = 1 << 1,
|
||||
FrameSizeChanged = 1 << 2,
|
||||
ErrorResilientMode = 1 << 3,
|
||||
LastShowFrame = 1 << 4,
|
||||
IntraOnly = 1 << 5,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(FrameFlags)
|
||||
|
||||
enum class TxSize {
|
||||
Tx4x4 = 0, // 4x4 transform
|
||||
Tx8x8 = 1, // 8x8 transform
|
||||
Tx16x16 = 2, // 16x16 transform
|
||||
Tx32x32 = 3, // 32x32 transform
|
||||
TxSizes = 4
|
||||
};
|
||||
|
||||
enum class TxMode {
|
||||
Only4X4 = 0, // Only 4x4 transform used
|
||||
Allow8X8 = 1, // Allow block transform size up to 8x8
|
||||
Allow16X16 = 2, // Allow block transform size up to 16x16
|
||||
Allow32X32 = 3, // Allow block transform size up to 32x32
|
||||
TxModeSelect = 4, // Transform specified for each block
|
||||
TxModes = 5
|
||||
};
|
||||
|
||||
struct Segmentation {
|
||||
u8 enabled;
|
||||
u8 update_map;
|
||||
u8 temporal_update;
|
||||
u8 abs_delta;
|
||||
std::array<u32, 8> feature_mask;
|
||||
std::array<std::array<s16, 4>, 8> feature_data;
|
||||
};
|
||||
static_assert(sizeof(Segmentation) == 0x64, "Segmentation is an invalid size");
|
||||
|
||||
struct LoopFilter {
|
||||
u8 mode_ref_delta_enabled;
|
||||
std::array<s8, 4> ref_deltas;
|
||||
std::array<s8, 2> mode_deltas;
|
||||
};
|
||||
static_assert(sizeof(LoopFilter) == 0x7, "LoopFilter is an invalid size");
|
||||
|
||||
struct Vp9EntropyProbs {
|
||||
std::array<u8, 36> y_mode_prob; ///< 0x0000
|
||||
std::array<u8, 64> partition_prob; ///< 0x0024
|
||||
std::array<u8, 1728> coef_probs; ///< 0x0064
|
||||
std::array<u8, 8> switchable_interp_prob; ///< 0x0724
|
||||
std::array<u8, 28> inter_mode_prob; ///< 0x072C
|
||||
std::array<u8, 4> intra_inter_prob; ///< 0x0748
|
||||
std::array<u8, 5> comp_inter_prob; ///< 0x074C
|
||||
std::array<u8, 10> single_ref_prob; ///< 0x0751
|
||||
std::array<u8, 5> comp_ref_prob; ///< 0x075B
|
||||
std::array<u8, 6> tx_32x32_prob; ///< 0x0760
|
||||
std::array<u8, 4> tx_16x16_prob; ///< 0x0766
|
||||
std::array<u8, 2> tx_8x8_prob; ///< 0x076A
|
||||
std::array<u8, 3> skip_probs; ///< 0x076C
|
||||
std::array<u8, 3> joints; ///< 0x076F
|
||||
std::array<u8, 2> sign; ///< 0x0772
|
||||
std::array<u8, 20> classes; ///< 0x0774
|
||||
std::array<u8, 2> class_0; ///< 0x0788
|
||||
std::array<u8, 20> prob_bits; ///< 0x078A
|
||||
std::array<u8, 12> class_0_fr; ///< 0x079E
|
||||
std::array<u8, 6> fr; ///< 0x07AA
|
||||
std::array<u8, 2> class_0_hp; ///< 0x07B0
|
||||
std::array<u8, 2> high_precision; ///< 0x07B2
|
||||
};
|
||||
static_assert(sizeof(Vp9EntropyProbs) == 0x7B4, "Vp9EntropyProbs is an invalid size");
|
||||
|
||||
struct Vp9PictureInfo {
|
||||
u32 bitstream_size;
|
||||
std::array<u64, 4> frame_offsets;
|
||||
std::array<s8, 4> ref_frame_sign_bias;
|
||||
s32 base_q_index;
|
||||
s32 y_dc_delta_q;
|
||||
s32 uv_dc_delta_q;
|
||||
s32 uv_ac_delta_q;
|
||||
s32 transform_mode;
|
||||
s32 interp_filter;
|
||||
s32 reference_mode;
|
||||
s32 log2_tile_cols;
|
||||
s32 log2_tile_rows;
|
||||
std::array<s8, 4> ref_deltas;
|
||||
std::array<s8, 2> mode_deltas;
|
||||
Vp9EntropyProbs entropy;
|
||||
Vp9FrameDimensions frame_size;
|
||||
u8 first_level;
|
||||
u8 sharpness_level;
|
||||
bool is_key_frame;
|
||||
bool intra_only;
|
||||
bool last_frame_was_key;
|
||||
bool error_resilient_mode;
|
||||
bool last_frame_shown;
|
||||
bool show_frame;
|
||||
bool lossless;
|
||||
bool allow_high_precision_mv;
|
||||
bool segment_enabled;
|
||||
bool mode_ref_delta_enabled;
|
||||
};
|
||||
|
||||
struct Vp9FrameContainer {
|
||||
Vp9PictureInfo info{};
|
||||
std::vector<u8> bit_stream;
|
||||
};
|
||||
|
||||
struct PictureInfo {
|
||||
INSERT_PADDING_WORDS_NOINIT(12); ///< 0x00
|
||||
u32 bitstream_size; ///< 0x30
|
||||
INSERT_PADDING_WORDS_NOINIT(5); ///< 0x34
|
||||
Vp9FrameDimensions last_frame_size; ///< 0x48
|
||||
Vp9FrameDimensions golden_frame_size; ///< 0x50
|
||||
Vp9FrameDimensions alt_frame_size; ///< 0x58
|
||||
Vp9FrameDimensions current_frame_size; ///< 0x60
|
||||
FrameFlags vp9_flags; ///< 0x68
|
||||
std::array<s8, 4> ref_frame_sign_bias; ///< 0x6C
|
||||
u8 first_level; ///< 0x70
|
||||
u8 sharpness_level; ///< 0x71
|
||||
u8 base_q_index; ///< 0x72
|
||||
u8 y_dc_delta_q; ///< 0x73
|
||||
u8 uv_ac_delta_q; ///< 0x74
|
||||
u8 uv_dc_delta_q; ///< 0x75
|
||||
u8 lossless; ///< 0x76
|
||||
u8 tx_mode; ///< 0x77
|
||||
u8 allow_high_precision_mv; ///< 0x78
|
||||
u8 interp_filter; ///< 0x79
|
||||
u8 reference_mode; ///< 0x7A
|
||||
INSERT_PADDING_BYTES_NOINIT(3); ///< 0x7B
|
||||
u8 log2_tile_cols; ///< 0x7E
|
||||
u8 log2_tile_rows; ///< 0x7F
|
||||
Segmentation segmentation; ///< 0x80
|
||||
LoopFilter loop_filter; ///< 0xE4
|
||||
INSERT_PADDING_BYTES_NOINIT(21); ///< 0xEB
|
||||
|
||||
[[nodiscard]] Vp9PictureInfo Convert() const {
|
||||
return {
|
||||
.bitstream_size = bitstream_size,
|
||||
.frame_offsets{},
|
||||
.ref_frame_sign_bias = ref_frame_sign_bias,
|
||||
.base_q_index = base_q_index,
|
||||
.y_dc_delta_q = y_dc_delta_q,
|
||||
.uv_dc_delta_q = uv_dc_delta_q,
|
||||
.uv_ac_delta_q = uv_ac_delta_q,
|
||||
.transform_mode = tx_mode,
|
||||
.interp_filter = interp_filter,
|
||||
.reference_mode = reference_mode,
|
||||
.log2_tile_cols = log2_tile_cols,
|
||||
.log2_tile_rows = log2_tile_rows,
|
||||
.ref_deltas = loop_filter.ref_deltas,
|
||||
.mode_deltas = loop_filter.mode_deltas,
|
||||
.entropy{},
|
||||
.frame_size = current_frame_size,
|
||||
.first_level = first_level,
|
||||
.sharpness_level = sharpness_level,
|
||||
.is_key_frame = True(vp9_flags & FrameFlags::IsKeyFrame),
|
||||
.intra_only = True(vp9_flags & FrameFlags::IntraOnly),
|
||||
.last_frame_was_key = True(vp9_flags & FrameFlags::LastFrameIsKeyFrame),
|
||||
.error_resilient_mode = True(vp9_flags & FrameFlags::ErrorResilientMode),
|
||||
.last_frame_shown = True(vp9_flags & FrameFlags::LastShowFrame),
|
||||
.show_frame = true,
|
||||
.lossless = lossless != 0,
|
||||
.allow_high_precision_mv = allow_high_precision_mv != 0,
|
||||
.segment_enabled = segmentation.enabled != 0,
|
||||
.mode_ref_delta_enabled = loop_filter.mode_ref_delta_enabled != 0,
|
||||
};
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(PictureInfo) == 0x100, "PictureInfo is an invalid size");
|
||||
|
||||
struct EntropyProbs {
|
||||
INSERT_PADDING_BYTES_NOINIT(1024); ///< 0x0000
|
||||
std::array<u8, 28> inter_mode_prob; ///< 0x0400
|
||||
std::array<u8, 4> intra_inter_prob; ///< 0x041C
|
||||
INSERT_PADDING_BYTES_NOINIT(80); ///< 0x0420
|
||||
std::array<u8, 2> tx_8x8_prob; ///< 0x0470
|
||||
std::array<u8, 4> tx_16x16_prob; ///< 0x0472
|
||||
std::array<u8, 6> tx_32x32_prob; ///< 0x0476
|
||||
std::array<u8, 4> y_mode_prob_e8; ///< 0x047C
|
||||
std::array<std::array<u8, 8>, 4> y_mode_prob_e0e7; ///< 0x0480
|
||||
INSERT_PADDING_BYTES_NOINIT(64); ///< 0x04A0
|
||||
std::array<u8, 64> partition_prob; ///< 0x04E0
|
||||
INSERT_PADDING_BYTES_NOINIT(10); ///< 0x0520
|
||||
std::array<u8, 8> switchable_interp_prob; ///< 0x052A
|
||||
std::array<u8, 5> comp_inter_prob; ///< 0x0532
|
||||
std::array<u8, 3> skip_probs; ///< 0x0537
|
||||
INSERT_PADDING_BYTES_NOINIT(1); ///< 0x053A
|
||||
std::array<u8, 3> joints; ///< 0x053B
|
||||
std::array<u8, 2> sign; ///< 0x053E
|
||||
std::array<u8, 2> class_0; ///< 0x0540
|
||||
std::array<u8, 6> fr; ///< 0x0542
|
||||
std::array<u8, 2> class_0_hp; ///< 0x0548
|
||||
std::array<u8, 2> high_precision; ///< 0x054A
|
||||
std::array<u8, 20> classes; ///< 0x054C
|
||||
std::array<u8, 12> class_0_fr; ///< 0x0560
|
||||
std::array<u8, 20> pred_bits; ///< 0x056C
|
||||
std::array<u8, 10> single_ref_prob; ///< 0x0580
|
||||
std::array<u8, 5> comp_ref_prob; ///< 0x058A
|
||||
INSERT_PADDING_BYTES_NOINIT(17); ///< 0x058F
|
||||
std::array<u8, 2304> coef_probs; ///< 0x05A0
|
||||
|
||||
void Convert(Vp9EntropyProbs& fc) {
|
||||
fc.inter_mode_prob = inter_mode_prob;
|
||||
fc.intra_inter_prob = intra_inter_prob;
|
||||
fc.tx_8x8_prob = tx_8x8_prob;
|
||||
fc.tx_16x16_prob = tx_16x16_prob;
|
||||
fc.tx_32x32_prob = tx_32x32_prob;
|
||||
|
||||
for (std::size_t i = 0; i < 4; i++) {
|
||||
for (std::size_t j = 0; j < 9; j++) {
|
||||
fc.y_mode_prob[j + 9 * i] = j < 8 ? y_mode_prob_e0e7[i][j] : y_mode_prob_e8[i];
|
||||
}
|
||||
}
|
||||
|
||||
fc.partition_prob = partition_prob;
|
||||
fc.switchable_interp_prob = switchable_interp_prob;
|
||||
fc.comp_inter_prob = comp_inter_prob;
|
||||
fc.skip_probs = skip_probs;
|
||||
fc.joints = joints;
|
||||
fc.sign = sign;
|
||||
fc.class_0 = class_0;
|
||||
fc.fr = fr;
|
||||
fc.class_0_hp = class_0_hp;
|
||||
fc.high_precision = high_precision;
|
||||
fc.classes = classes;
|
||||
fc.class_0_fr = class_0_fr;
|
||||
fc.prob_bits = pred_bits;
|
||||
fc.single_ref_prob = single_ref_prob;
|
||||
fc.comp_ref_prob = comp_ref_prob;
|
||||
|
||||
// Skip the 4th element as it goes unused
|
||||
for (std::size_t i = 0; i < coef_probs.size(); i += 4) {
|
||||
const std::size_t j = i - i / 4;
|
||||
fc.coef_probs[j] = coef_probs[i];
|
||||
fc.coef_probs[j + 1] = coef_probs[i + 1];
|
||||
fc.coef_probs[j + 2] = coef_probs[i + 2];
|
||||
}
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(EntropyProbs) == 0xEA0, "EntropyProbs is an invalid size");
|
||||
|
||||
enum class Ref { Last, Golden, AltRef };
|
||||
|
||||
struct RefPoolElement {
|
||||
s64 frame{};
|
||||
Ref ref{};
|
||||
bool refresh{};
|
||||
};
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(Vp9EntropyProbs, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(partition_prob, 0x0024);
|
||||
ASSERT_POSITION(switchable_interp_prob, 0x0724);
|
||||
ASSERT_POSITION(sign, 0x0772);
|
||||
ASSERT_POSITION(class_0_fr, 0x079E);
|
||||
ASSERT_POSITION(high_precision, 0x07B2);
|
||||
#undef ASSERT_POSITION
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(PictureInfo, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(bitstream_size, 0x30);
|
||||
ASSERT_POSITION(last_frame_size, 0x48);
|
||||
ASSERT_POSITION(first_level, 0x70);
|
||||
ASSERT_POSITION(segmentation, 0x80);
|
||||
ASSERT_POSITION(loop_filter, 0xE4);
|
||||
#undef ASSERT_POSITION
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(EntropyProbs, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(inter_mode_prob, 0x400);
|
||||
ASSERT_POSITION(tx_8x8_prob, 0x470);
|
||||
ASSERT_POSITION(partition_prob, 0x4E0);
|
||||
ASSERT_POSITION(class_0, 0x540);
|
||||
ASSERT_POSITION(class_0_fr, 0x560);
|
||||
ASSERT_POSITION(coef_probs, 0x5A0);
|
||||
#undef ASSERT_POSITION
|
||||
|
||||
}; // namespace Decoder
|
||||
}; // namespace Tegra
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Decoder {
|
||||
struct Vp9FrameDimensions {
|
||||
s16 width;
|
||||
s16 height;
|
||||
s16 luma_pitch;
|
||||
s16 chroma_pitch;
|
||||
};
|
||||
static_assert(sizeof(Vp9FrameDimensions) == 0x8, "Vp9 Vp9FrameDimensions is an invalid size");
|
||||
|
||||
enum class FrameFlags : u32 {
|
||||
IsKeyFrame = 1 << 0,
|
||||
LastFrameIsKeyFrame = 1 << 1,
|
||||
FrameSizeChanged = 1 << 2,
|
||||
ErrorResilientMode = 1 << 3,
|
||||
LastShowFrame = 1 << 4,
|
||||
IntraOnly = 1 << 5,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(FrameFlags)
|
||||
|
||||
enum class TxSize {
|
||||
Tx4x4 = 0, // 4x4 transform
|
||||
Tx8x8 = 1, // 8x8 transform
|
||||
Tx16x16 = 2, // 16x16 transform
|
||||
Tx32x32 = 3, // 32x32 transform
|
||||
TxSizes = 4
|
||||
};
|
||||
|
||||
enum class TxMode {
|
||||
Only4X4 = 0, // Only 4x4 transform used
|
||||
Allow8X8 = 1, // Allow block transform size up to 8x8
|
||||
Allow16X16 = 2, // Allow block transform size up to 16x16
|
||||
Allow32X32 = 3, // Allow block transform size up to 32x32
|
||||
TxModeSelect = 4, // Transform specified for each block
|
||||
TxModes = 5
|
||||
};
|
||||
|
||||
struct Segmentation {
|
||||
u8 enabled;
|
||||
u8 update_map;
|
||||
u8 temporal_update;
|
||||
u8 abs_delta;
|
||||
std::array<u32, 8> feature_mask;
|
||||
std::array<std::array<s16, 4>, 8> feature_data;
|
||||
};
|
||||
static_assert(sizeof(Segmentation) == 0x64, "Segmentation is an invalid size");
|
||||
|
||||
struct LoopFilter {
|
||||
u8 mode_ref_delta_enabled;
|
||||
std::array<s8, 4> ref_deltas;
|
||||
std::array<s8, 2> mode_deltas;
|
||||
};
|
||||
static_assert(sizeof(LoopFilter) == 0x7, "LoopFilter is an invalid size");
|
||||
|
||||
struct Vp9EntropyProbs {
|
||||
std::array<u8, 36> y_mode_prob; ///< 0x0000
|
||||
std::array<u8, 64> partition_prob; ///< 0x0024
|
||||
std::array<u8, 1728> coef_probs; ///< 0x0064
|
||||
std::array<u8, 8> switchable_interp_prob; ///< 0x0724
|
||||
std::array<u8, 28> inter_mode_prob; ///< 0x072C
|
||||
std::array<u8, 4> intra_inter_prob; ///< 0x0748
|
||||
std::array<u8, 5> comp_inter_prob; ///< 0x074C
|
||||
std::array<u8, 10> single_ref_prob; ///< 0x0751
|
||||
std::array<u8, 5> comp_ref_prob; ///< 0x075B
|
||||
std::array<u8, 6> tx_32x32_prob; ///< 0x0760
|
||||
std::array<u8, 4> tx_16x16_prob; ///< 0x0766
|
||||
std::array<u8, 2> tx_8x8_prob; ///< 0x076A
|
||||
std::array<u8, 3> skip_probs; ///< 0x076C
|
||||
std::array<u8, 3> joints; ///< 0x076F
|
||||
std::array<u8, 2> sign; ///< 0x0772
|
||||
std::array<u8, 20> classes; ///< 0x0774
|
||||
std::array<u8, 2> class_0; ///< 0x0788
|
||||
std::array<u8, 20> prob_bits; ///< 0x078A
|
||||
std::array<u8, 12> class_0_fr; ///< 0x079E
|
||||
std::array<u8, 6> fr; ///< 0x07AA
|
||||
std::array<u8, 2> class_0_hp; ///< 0x07B0
|
||||
std::array<u8, 2> high_precision; ///< 0x07B2
|
||||
};
|
||||
static_assert(sizeof(Vp9EntropyProbs) == 0x7B4, "Vp9EntropyProbs is an invalid size");
|
||||
|
||||
struct Vp9PictureInfo {
|
||||
u32 bitstream_size;
|
||||
std::array<u64, 4> frame_offsets;
|
||||
std::array<s8, 4> ref_frame_sign_bias;
|
||||
s32 base_q_index;
|
||||
s32 y_dc_delta_q;
|
||||
s32 uv_dc_delta_q;
|
||||
s32 uv_ac_delta_q;
|
||||
s32 transform_mode;
|
||||
s32 interp_filter;
|
||||
s32 reference_mode;
|
||||
s32 log2_tile_cols;
|
||||
s32 log2_tile_rows;
|
||||
std::array<s8, 4> ref_deltas;
|
||||
std::array<s8, 2> mode_deltas;
|
||||
Vp9EntropyProbs entropy;
|
||||
Vp9FrameDimensions frame_size;
|
||||
u8 first_level;
|
||||
u8 sharpness_level;
|
||||
bool is_key_frame;
|
||||
bool intra_only;
|
||||
bool last_frame_was_key;
|
||||
bool error_resilient_mode;
|
||||
bool last_frame_shown;
|
||||
bool show_frame;
|
||||
bool lossless;
|
||||
bool allow_high_precision_mv;
|
||||
bool segment_enabled;
|
||||
bool mode_ref_delta_enabled;
|
||||
};
|
||||
|
||||
struct Vp9FrameContainer {
|
||||
Vp9PictureInfo info{};
|
||||
std::vector<u8> bit_stream;
|
||||
};
|
||||
|
||||
struct PictureInfo {
|
||||
INSERT_PADDING_WORDS_NOINIT(12); ///< 0x00
|
||||
u32 bitstream_size; ///< 0x30
|
||||
INSERT_PADDING_WORDS_NOINIT(5); ///< 0x34
|
||||
Vp9FrameDimensions last_frame_size; ///< 0x48
|
||||
Vp9FrameDimensions golden_frame_size; ///< 0x50
|
||||
Vp9FrameDimensions alt_frame_size; ///< 0x58
|
||||
Vp9FrameDimensions current_frame_size; ///< 0x60
|
||||
FrameFlags vp9_flags; ///< 0x68
|
||||
std::array<s8, 4> ref_frame_sign_bias; ///< 0x6C
|
||||
u8 first_level; ///< 0x70
|
||||
u8 sharpness_level; ///< 0x71
|
||||
u8 base_q_index; ///< 0x72
|
||||
u8 y_dc_delta_q; ///< 0x73
|
||||
u8 uv_ac_delta_q; ///< 0x74
|
||||
u8 uv_dc_delta_q; ///< 0x75
|
||||
u8 lossless; ///< 0x76
|
||||
u8 tx_mode; ///< 0x77
|
||||
u8 allow_high_precision_mv; ///< 0x78
|
||||
u8 interp_filter; ///< 0x79
|
||||
u8 reference_mode; ///< 0x7A
|
||||
INSERT_PADDING_BYTES_NOINIT(3); ///< 0x7B
|
||||
u8 log2_tile_cols; ///< 0x7E
|
||||
u8 log2_tile_rows; ///< 0x7F
|
||||
Segmentation segmentation; ///< 0x80
|
||||
LoopFilter loop_filter; ///< 0xE4
|
||||
INSERT_PADDING_BYTES_NOINIT(21); ///< 0xEB
|
||||
|
||||
[[nodiscard]] Vp9PictureInfo Convert() const {
|
||||
return {
|
||||
.bitstream_size = bitstream_size,
|
||||
.frame_offsets{},
|
||||
.ref_frame_sign_bias = ref_frame_sign_bias,
|
||||
.base_q_index = base_q_index,
|
||||
.y_dc_delta_q = y_dc_delta_q,
|
||||
.uv_dc_delta_q = uv_dc_delta_q,
|
||||
.uv_ac_delta_q = uv_ac_delta_q,
|
||||
.transform_mode = tx_mode,
|
||||
.interp_filter = interp_filter,
|
||||
.reference_mode = reference_mode,
|
||||
.log2_tile_cols = log2_tile_cols,
|
||||
.log2_tile_rows = log2_tile_rows,
|
||||
.ref_deltas = loop_filter.ref_deltas,
|
||||
.mode_deltas = loop_filter.mode_deltas,
|
||||
.entropy{},
|
||||
.frame_size = current_frame_size,
|
||||
.first_level = first_level,
|
||||
.sharpness_level = sharpness_level,
|
||||
.is_key_frame = True(vp9_flags & FrameFlags::IsKeyFrame),
|
||||
.intra_only = True(vp9_flags & FrameFlags::IntraOnly),
|
||||
.last_frame_was_key = True(vp9_flags & FrameFlags::LastFrameIsKeyFrame),
|
||||
.error_resilient_mode = True(vp9_flags & FrameFlags::ErrorResilientMode),
|
||||
.last_frame_shown = True(vp9_flags & FrameFlags::LastShowFrame),
|
||||
.show_frame = true,
|
||||
.lossless = lossless != 0,
|
||||
.allow_high_precision_mv = allow_high_precision_mv != 0,
|
||||
.segment_enabled = segmentation.enabled != 0,
|
||||
.mode_ref_delta_enabled = loop_filter.mode_ref_delta_enabled != 0,
|
||||
};
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(PictureInfo) == 0x100, "PictureInfo is an invalid size");
|
||||
|
||||
struct EntropyProbs {
|
||||
INSERT_PADDING_BYTES_NOINIT(1024); ///< 0x0000
|
||||
std::array<u8, 28> inter_mode_prob; ///< 0x0400
|
||||
std::array<u8, 4> intra_inter_prob; ///< 0x041C
|
||||
INSERT_PADDING_BYTES_NOINIT(80); ///< 0x0420
|
||||
std::array<u8, 2> tx_8x8_prob; ///< 0x0470
|
||||
std::array<u8, 4> tx_16x16_prob; ///< 0x0472
|
||||
std::array<u8, 6> tx_32x32_prob; ///< 0x0476
|
||||
std::array<u8, 4> y_mode_prob_e8; ///< 0x047C
|
||||
std::array<std::array<u8, 8>, 4> y_mode_prob_e0e7; ///< 0x0480
|
||||
INSERT_PADDING_BYTES_NOINIT(64); ///< 0x04A0
|
||||
std::array<u8, 64> partition_prob; ///< 0x04E0
|
||||
INSERT_PADDING_BYTES_NOINIT(10); ///< 0x0520
|
||||
std::array<u8, 8> switchable_interp_prob; ///< 0x052A
|
||||
std::array<u8, 5> comp_inter_prob; ///< 0x0532
|
||||
std::array<u8, 3> skip_probs; ///< 0x0537
|
||||
INSERT_PADDING_BYTES_NOINIT(1); ///< 0x053A
|
||||
std::array<u8, 3> joints; ///< 0x053B
|
||||
std::array<u8, 2> sign; ///< 0x053E
|
||||
std::array<u8, 2> class_0; ///< 0x0540
|
||||
std::array<u8, 6> fr; ///< 0x0542
|
||||
std::array<u8, 2> class_0_hp; ///< 0x0548
|
||||
std::array<u8, 2> high_precision; ///< 0x054A
|
||||
std::array<u8, 20> classes; ///< 0x054C
|
||||
std::array<u8, 12> class_0_fr; ///< 0x0560
|
||||
std::array<u8, 20> pred_bits; ///< 0x056C
|
||||
std::array<u8, 10> single_ref_prob; ///< 0x0580
|
||||
std::array<u8, 5> comp_ref_prob; ///< 0x058A
|
||||
INSERT_PADDING_BYTES_NOINIT(17); ///< 0x058F
|
||||
std::array<u8, 2304> coef_probs; ///< 0x05A0
|
||||
|
||||
void Convert(Vp9EntropyProbs& fc) {
|
||||
fc.inter_mode_prob = inter_mode_prob;
|
||||
fc.intra_inter_prob = intra_inter_prob;
|
||||
fc.tx_8x8_prob = tx_8x8_prob;
|
||||
fc.tx_16x16_prob = tx_16x16_prob;
|
||||
fc.tx_32x32_prob = tx_32x32_prob;
|
||||
|
||||
for (std::size_t i = 0; i < 4; i++) {
|
||||
for (std::size_t j = 0; j < 9; j++) {
|
||||
fc.y_mode_prob[j + 9 * i] = j < 8 ? y_mode_prob_e0e7[i][j] : y_mode_prob_e8[i];
|
||||
}
|
||||
}
|
||||
|
||||
fc.partition_prob = partition_prob;
|
||||
fc.switchable_interp_prob = switchable_interp_prob;
|
||||
fc.comp_inter_prob = comp_inter_prob;
|
||||
fc.skip_probs = skip_probs;
|
||||
fc.joints = joints;
|
||||
fc.sign = sign;
|
||||
fc.class_0 = class_0;
|
||||
fc.fr = fr;
|
||||
fc.class_0_hp = class_0_hp;
|
||||
fc.high_precision = high_precision;
|
||||
fc.classes = classes;
|
||||
fc.class_0_fr = class_0_fr;
|
||||
fc.prob_bits = pred_bits;
|
||||
fc.single_ref_prob = single_ref_prob;
|
||||
fc.comp_ref_prob = comp_ref_prob;
|
||||
|
||||
// Skip the 4th element as it goes unused
|
||||
for (std::size_t i = 0; i < coef_probs.size(); i += 4) {
|
||||
const std::size_t j = i - i / 4;
|
||||
fc.coef_probs[j] = coef_probs[i];
|
||||
fc.coef_probs[j + 1] = coef_probs[i + 1];
|
||||
fc.coef_probs[j + 2] = coef_probs[i + 2];
|
||||
}
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(EntropyProbs) == 0xEA0, "EntropyProbs is an invalid size");
|
||||
|
||||
enum class Ref { Last, Golden, AltRef };
|
||||
|
||||
struct RefPoolElement {
|
||||
s64 frame{};
|
||||
Ref ref{};
|
||||
bool refresh{};
|
||||
};
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(Vp9EntropyProbs, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(partition_prob, 0x0024);
|
||||
ASSERT_POSITION(switchable_interp_prob, 0x0724);
|
||||
ASSERT_POSITION(sign, 0x0772);
|
||||
ASSERT_POSITION(class_0_fr, 0x079E);
|
||||
ASSERT_POSITION(high_precision, 0x07B2);
|
||||
#undef ASSERT_POSITION
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(PictureInfo, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(bitstream_size, 0x30);
|
||||
ASSERT_POSITION(last_frame_size, 0x48);
|
||||
ASSERT_POSITION(first_level, 0x70);
|
||||
ASSERT_POSITION(segmentation, 0x80);
|
||||
ASSERT_POSITION(loop_filter, 0xE4);
|
||||
#undef ASSERT_POSITION
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(EntropyProbs, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(inter_mode_prob, 0x400);
|
||||
ASSERT_POSITION(tx_8x8_prob, 0x470);
|
||||
ASSERT_POSITION(partition_prob, 0x4E0);
|
||||
ASSERT_POSITION(class_0, 0x540);
|
||||
ASSERT_POSITION(class_0_fr, 0x560);
|
||||
ASSERT_POSITION(coef_probs, 0x5A0);
|
||||
#undef ASSERT_POSITION
|
||||
|
||||
}; // namespace Decoder
|
||||
}; // namespace Tegra
|
||||
|
||||
@@ -1,33 +1,33 @@
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "video_core/host1x/control.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace Tegra::Host1x {
|
||||
|
||||
Control::Control(Host1x& host1x_) : host1x(host1x_) {}
|
||||
|
||||
Control::~Control() = default;
|
||||
|
||||
void Control::ProcessMethod(Method method, u32 argument) {
|
||||
switch (method) {
|
||||
case Method::LoadSyncptPayload32:
|
||||
syncpoint_value = argument;
|
||||
break;
|
||||
case Method::WaitSyncpt:
|
||||
case Method::WaitSyncpt32:
|
||||
Execute(argument);
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Control method 0x{:X}", static_cast<u32>(method));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void Control::Execute(u32 data) {
|
||||
host1x.GetSyncpointManager().WaitHost(data, syncpoint_value);
|
||||
}
|
||||
|
||||
} // namespace Tegra::Host1x
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "video_core/host1x/control.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace Tegra::Host1x {
|
||||
|
||||
Control::Control(Host1x& host1x_) : host1x(host1x_) {}
|
||||
|
||||
Control::~Control() = default;
|
||||
|
||||
void Control::ProcessMethod(Method method, u32 argument) {
|
||||
switch (method) {
|
||||
case Method::LoadSyncptPayload32:
|
||||
syncpoint_value = argument;
|
||||
break;
|
||||
case Method::WaitSyncpt:
|
||||
case Method::WaitSyncpt32:
|
||||
Execute(argument);
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Control method 0x{:X}", static_cast<u32>(method));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void Control::Execute(u32 data) {
|
||||
host1x.GetSyncpointManager().WaitHost(data, syncpoint_value);
|
||||
}
|
||||
|
||||
} // namespace Tegra::Host1x
|
||||
|
||||
@@ -1,40 +1,40 @@
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class Host1x;
|
||||
class Nvdec;
|
||||
|
||||
class Control {
|
||||
public:
|
||||
enum class Method : u32 {
|
||||
WaitSyncpt = 0x8,
|
||||
LoadSyncptPayload32 = 0x4e,
|
||||
WaitSyncpt32 = 0x50,
|
||||
};
|
||||
|
||||
explicit Control(Host1x& host1x);
|
||||
~Control();
|
||||
|
||||
/// Writes the method into the state, Invoke Execute() if encountered
|
||||
void ProcessMethod(Method method, u32 argument);
|
||||
|
||||
private:
|
||||
/// For Host1x, execute is waiting on a syncpoint previously written into the state
|
||||
void Execute(u32 data);
|
||||
|
||||
u32 syncpoint_value{};
|
||||
Host1x& host1x;
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class Host1x;
|
||||
class Nvdec;
|
||||
|
||||
class Control {
|
||||
public:
|
||||
enum class Method : u32 {
|
||||
WaitSyncpt = 0x8,
|
||||
LoadSyncptPayload32 = 0x4e,
|
||||
WaitSyncpt32 = 0x50,
|
||||
};
|
||||
|
||||
explicit Control(Host1x& host1x);
|
||||
~Control();
|
||||
|
||||
/// Writes the method into the state, Invoke Execute() if encountered
|
||||
void ProcessMethod(Method method, u32 argument);
|
||||
|
||||
private:
|
||||
/// For Host1x, execute is waiting on a syncpoint previously written into the state
|
||||
void Execute(u32 data);
|
||||
|
||||
u32 syncpoint_value{};
|
||||
Host1x& host1x;
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "core/core.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
Host1x::Host1x(Core::System& system_)
|
||||
: system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12},
|
||||
allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {}
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "core/core.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
Host1x::Host1x(Core::System& system_)
|
||||
: system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12},
|
||||
allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {}
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,57 +1,57 @@
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
#include "common/address_space.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class Host1x {
|
||||
public:
|
||||
explicit Host1x(Core::System& system);
|
||||
|
||||
SyncpointManager& GetSyncpointManager() {
|
||||
return syncpoint_manager;
|
||||
}
|
||||
|
||||
const SyncpointManager& GetSyncpointManager() const {
|
||||
return syncpoint_manager;
|
||||
}
|
||||
|
||||
Tegra::MemoryManager& MemoryManager() {
|
||||
return memory_manager;
|
||||
}
|
||||
|
||||
const Tegra::MemoryManager& MemoryManager() const {
|
||||
return memory_manager;
|
||||
}
|
||||
|
||||
Common::FlatAllocator<u32, 0, 32>& Allocator() {
|
||||
return *allocator;
|
||||
}
|
||||
|
||||
const Common::FlatAllocator<u32, 0, 32>& Allocator() const {
|
||||
return *allocator;
|
||||
}
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
SyncpointManager syncpoint_manager;
|
||||
Tegra::MemoryManager memory_manager;
|
||||
std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator;
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
#include "common/address_space.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class Host1x {
|
||||
public:
|
||||
explicit Host1x(Core::System& system);
|
||||
|
||||
SyncpointManager& GetSyncpointManager() {
|
||||
return syncpoint_manager;
|
||||
}
|
||||
|
||||
const SyncpointManager& GetSyncpointManager() const {
|
||||
return syncpoint_manager;
|
||||
}
|
||||
|
||||
Tegra::MemoryManager& MemoryManager() {
|
||||
return memory_manager;
|
||||
}
|
||||
|
||||
const Tegra::MemoryManager& MemoryManager() const {
|
||||
return memory_manager;
|
||||
}
|
||||
|
||||
Common::FlatAllocator<u32, 0, 32>& Allocator() {
|
||||
return *allocator;
|
||||
}
|
||||
|
||||
const Common::FlatAllocator<u32, 0, 32>& Allocator() const {
|
||||
return *allocator;
|
||||
}
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
SyncpointManager syncpoint_manager;
|
||||
Tegra::MemoryManager memory_manager;
|
||||
std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator;
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,48 +1,48 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/nvdec.h"
|
||||
|
||||
namespace Tegra::Host1x {
|
||||
|
||||
#define NVDEC_REG_INDEX(field_name) \
|
||||
(offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64))
|
||||
|
||||
Nvdec::Nvdec(Host1x& host1x_)
|
||||
: host1x(host1x_), state{}, codec(std::make_unique<Codec>(host1x, state)) {}
|
||||
|
||||
Nvdec::~Nvdec() = default;
|
||||
|
||||
void Nvdec::ProcessMethod(u32 method, u32 argument) {
|
||||
state.reg_array[method] = static_cast<u64>(argument) << 8;
|
||||
|
||||
switch (method) {
|
||||
case NVDEC_REG_INDEX(set_codec_id):
|
||||
codec->SetTargetCodec(static_cast<NvdecCommon::VideoCodec>(argument));
|
||||
break;
|
||||
case NVDEC_REG_INDEX(execute):
|
||||
Execute();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
AVFramePtr Nvdec::GetFrame() {
|
||||
return codec->GetCurrentFrame();
|
||||
}
|
||||
|
||||
void Nvdec::Execute() {
|
||||
switch (codec->GetCurrentCodec()) {
|
||||
case NvdecCommon::VideoCodec::H264:
|
||||
case NvdecCommon::VideoCodec::VP8:
|
||||
case NvdecCommon::VideoCodec::VP9:
|
||||
codec->Decode();
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Codec {}", codec->GetCurrentCodecName());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Tegra::Host1x
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/nvdec.h"
|
||||
|
||||
namespace Tegra::Host1x {
|
||||
|
||||
#define NVDEC_REG_INDEX(field_name) \
|
||||
(offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64))
|
||||
|
||||
Nvdec::Nvdec(Host1x& host1x_)
|
||||
: host1x(host1x_), state{}, codec(std::make_unique<Codec>(host1x, state)) {}
|
||||
|
||||
Nvdec::~Nvdec() = default;
|
||||
|
||||
void Nvdec::ProcessMethod(u32 method, u32 argument) {
|
||||
state.reg_array[method] = static_cast<u64>(argument) << 8;
|
||||
|
||||
switch (method) {
|
||||
case NVDEC_REG_INDEX(set_codec_id):
|
||||
codec->SetTargetCodec(static_cast<NvdecCommon::VideoCodec>(argument));
|
||||
break;
|
||||
case NVDEC_REG_INDEX(execute):
|
||||
Execute();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
AVFramePtr Nvdec::GetFrame() {
|
||||
return codec->GetCurrentFrame();
|
||||
}
|
||||
|
||||
void Nvdec::Execute() {
|
||||
switch (codec->GetCurrentCodec()) {
|
||||
case NvdecCommon::VideoCodec::H264:
|
||||
case NvdecCommon::VideoCodec::VP8:
|
||||
case NvdecCommon::VideoCodec::VP9:
|
||||
codec->Decode();
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Codec {}", codec->GetCurrentCodecName());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Tegra::Host1x
|
||||
|
||||
@@ -1,39 +1,39 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/host1x/codecs/codec.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class Host1x;
|
||||
|
||||
class Nvdec {
|
||||
public:
|
||||
explicit Nvdec(Host1x& host1x);
|
||||
~Nvdec();
|
||||
|
||||
/// Writes the method into the state, Invoke Execute() if encountered
|
||||
void ProcessMethod(u32 method, u32 argument);
|
||||
|
||||
/// Return most recently decoded frame
|
||||
[[nodiscard]] AVFramePtr GetFrame();
|
||||
|
||||
private:
|
||||
/// Invoke codec to decode a frame
|
||||
void Execute();
|
||||
|
||||
Host1x& host1x;
|
||||
NvdecCommon::NvdecRegisters state;
|
||||
std::unique_ptr<Codec> codec;
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/host1x/codecs/codec.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class Host1x;
|
||||
|
||||
class Nvdec {
|
||||
public:
|
||||
explicit Nvdec(Host1x& host1x);
|
||||
~Nvdec();
|
||||
|
||||
/// Writes the method into the state, Invoke Execute() if encountered
|
||||
void ProcessMethod(u32 method, u32 argument);
|
||||
|
||||
/// Return most recently decoded frame
|
||||
[[nodiscard]] AVFramePtr GetFrame();
|
||||
|
||||
private:
|
||||
/// Invoke codec to decode a frame
|
||||
void Execute();
|
||||
|
||||
Host1x& host1x;
|
||||
NvdecCommon::NvdecRegisters state;
|
||||
std::unique_ptr<Codec> codec;
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,97 +1,97 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra::Host1x::NvdecCommon {
|
||||
|
||||
enum class VideoCodec : u64 {
|
||||
None = 0x0,
|
||||
H264 = 0x3,
|
||||
VP8 = 0x5,
|
||||
H265 = 0x7,
|
||||
VP9 = 0x9,
|
||||
};
|
||||
|
||||
// NVDEC should use a 32-bit address space, but is mapped to 64-bit,
|
||||
// doubling the sizes here is compensating for that.
|
||||
struct NvdecRegisters {
|
||||
static constexpr std::size_t NUM_REGS = 0x178;
|
||||
|
||||
union {
|
||||
struct {
|
||||
INSERT_PADDING_WORDS_NOINIT(256); ///< 0x0000
|
||||
VideoCodec set_codec_id; ///< 0x0400
|
||||
INSERT_PADDING_WORDS_NOINIT(126); ///< 0x0408
|
||||
u64 execute; ///< 0x0600
|
||||
INSERT_PADDING_WORDS_NOINIT(126); ///< 0x0608
|
||||
struct { ///< 0x0800
|
||||
union {
|
||||
BitField<0, 3, VideoCodec> codec;
|
||||
BitField<4, 1, u64> gp_timer_on;
|
||||
BitField<13, 1, u64> mb_timer_on;
|
||||
BitField<14, 1, u64> intra_frame_pslc;
|
||||
BitField<17, 1, u64> all_intra_frame;
|
||||
};
|
||||
} control_params;
|
||||
u64 picture_info_offset; ///< 0x0808
|
||||
u64 frame_bitstream_offset; ///< 0x0810
|
||||
u64 frame_number; ///< 0x0818
|
||||
u64 h264_slice_data_offsets; ///< 0x0820
|
||||
u64 h264_mv_dump_offset; ///< 0x0828
|
||||
INSERT_PADDING_WORDS_NOINIT(6); ///< 0x0830
|
||||
u64 frame_stats_offset; ///< 0x0848
|
||||
u64 h264_last_surface_luma_offset; ///< 0x0850
|
||||
u64 h264_last_surface_chroma_offset; ///< 0x0858
|
||||
std::array<u64, 17> surface_luma_offset; ///< 0x0860
|
||||
std::array<u64, 17> surface_chroma_offset; ///< 0x08E8
|
||||
INSERT_PADDING_WORDS_NOINIT(68); ///< 0x0970
|
||||
u64 vp8_prob_data_offset; ///< 0x0A80
|
||||
u64 vp8_header_partition_buf_offset; ///< 0x0A88
|
||||
INSERT_PADDING_WORDS_NOINIT(60); ///< 0x0A90
|
||||
u64 vp9_entropy_probs_offset; ///< 0x0B80
|
||||
u64 vp9_backward_updates_offset; ///< 0x0B88
|
||||
u64 vp9_last_frame_segmap_offset; ///< 0x0B90
|
||||
u64 vp9_curr_frame_segmap_offset; ///< 0x0B98
|
||||
INSERT_PADDING_WORDS_NOINIT(2); ///< 0x0BA0
|
||||
u64 vp9_last_frame_mvs_offset; ///< 0x0BA8
|
||||
u64 vp9_curr_frame_mvs_offset; ///< 0x0BB0
|
||||
INSERT_PADDING_WORDS_NOINIT(2); ///< 0x0BB8
|
||||
};
|
||||
std::array<u64, NUM_REGS> reg_array;
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(NvdecRegisters) == (0xBC0), "NvdecRegisters is incorrect size");
|
||||
|
||||
#define ASSERT_REG_POSITION(field_name, position) \
|
||||
static_assert(offsetof(NvdecRegisters, field_name) == position * sizeof(u64), \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_REG_POSITION(set_codec_id, 0x80);
|
||||
ASSERT_REG_POSITION(execute, 0xC0);
|
||||
ASSERT_REG_POSITION(control_params, 0x100);
|
||||
ASSERT_REG_POSITION(picture_info_offset, 0x101);
|
||||
ASSERT_REG_POSITION(frame_bitstream_offset, 0x102);
|
||||
ASSERT_REG_POSITION(frame_number, 0x103);
|
||||
ASSERT_REG_POSITION(h264_slice_data_offsets, 0x104);
|
||||
ASSERT_REG_POSITION(frame_stats_offset, 0x109);
|
||||
ASSERT_REG_POSITION(h264_last_surface_luma_offset, 0x10A);
|
||||
ASSERT_REG_POSITION(h264_last_surface_chroma_offset, 0x10B);
|
||||
ASSERT_REG_POSITION(surface_luma_offset, 0x10C);
|
||||
ASSERT_REG_POSITION(surface_chroma_offset, 0x11D);
|
||||
ASSERT_REG_POSITION(vp8_prob_data_offset, 0x150);
|
||||
ASSERT_REG_POSITION(vp8_header_partition_buf_offset, 0x151);
|
||||
ASSERT_REG_POSITION(vp9_entropy_probs_offset, 0x170);
|
||||
ASSERT_REG_POSITION(vp9_backward_updates_offset, 0x171);
|
||||
ASSERT_REG_POSITION(vp9_last_frame_segmap_offset, 0x172);
|
||||
ASSERT_REG_POSITION(vp9_curr_frame_segmap_offset, 0x173);
|
||||
ASSERT_REG_POSITION(vp9_last_frame_mvs_offset, 0x175);
|
||||
ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176);
|
||||
|
||||
#undef ASSERT_REG_POSITION
|
||||
|
||||
} // namespace Tegra::Host1x::NvdecCommon
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra::Host1x::NvdecCommon {
|
||||
|
||||
enum class VideoCodec : u64 {
|
||||
None = 0x0,
|
||||
H264 = 0x3,
|
||||
VP8 = 0x5,
|
||||
H265 = 0x7,
|
||||
VP9 = 0x9,
|
||||
};
|
||||
|
||||
// NVDEC should use a 32-bit address space, but is mapped to 64-bit,
|
||||
// doubling the sizes here is compensating for that.
|
||||
struct NvdecRegisters {
|
||||
static constexpr std::size_t NUM_REGS = 0x178;
|
||||
|
||||
union {
|
||||
struct {
|
||||
INSERT_PADDING_WORDS_NOINIT(256); ///< 0x0000
|
||||
VideoCodec set_codec_id; ///< 0x0400
|
||||
INSERT_PADDING_WORDS_NOINIT(126); ///< 0x0408
|
||||
u64 execute; ///< 0x0600
|
||||
INSERT_PADDING_WORDS_NOINIT(126); ///< 0x0608
|
||||
struct { ///< 0x0800
|
||||
union {
|
||||
BitField<0, 3, VideoCodec> codec;
|
||||
BitField<4, 1, u64> gp_timer_on;
|
||||
BitField<13, 1, u64> mb_timer_on;
|
||||
BitField<14, 1, u64> intra_frame_pslc;
|
||||
BitField<17, 1, u64> all_intra_frame;
|
||||
};
|
||||
} control_params;
|
||||
u64 picture_info_offset; ///< 0x0808
|
||||
u64 frame_bitstream_offset; ///< 0x0810
|
||||
u64 frame_number; ///< 0x0818
|
||||
u64 h264_slice_data_offsets; ///< 0x0820
|
||||
u64 h264_mv_dump_offset; ///< 0x0828
|
||||
INSERT_PADDING_WORDS_NOINIT(6); ///< 0x0830
|
||||
u64 frame_stats_offset; ///< 0x0848
|
||||
u64 h264_last_surface_luma_offset; ///< 0x0850
|
||||
u64 h264_last_surface_chroma_offset; ///< 0x0858
|
||||
std::array<u64, 17> surface_luma_offset; ///< 0x0860
|
||||
std::array<u64, 17> surface_chroma_offset; ///< 0x08E8
|
||||
INSERT_PADDING_WORDS_NOINIT(68); ///< 0x0970
|
||||
u64 vp8_prob_data_offset; ///< 0x0A80
|
||||
u64 vp8_header_partition_buf_offset; ///< 0x0A88
|
||||
INSERT_PADDING_WORDS_NOINIT(60); ///< 0x0A90
|
||||
u64 vp9_entropy_probs_offset; ///< 0x0B80
|
||||
u64 vp9_backward_updates_offset; ///< 0x0B88
|
||||
u64 vp9_last_frame_segmap_offset; ///< 0x0B90
|
||||
u64 vp9_curr_frame_segmap_offset; ///< 0x0B98
|
||||
INSERT_PADDING_WORDS_NOINIT(2); ///< 0x0BA0
|
||||
u64 vp9_last_frame_mvs_offset; ///< 0x0BA8
|
||||
u64 vp9_curr_frame_mvs_offset; ///< 0x0BB0
|
||||
INSERT_PADDING_WORDS_NOINIT(2); ///< 0x0BB8
|
||||
};
|
||||
std::array<u64, NUM_REGS> reg_array;
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(NvdecRegisters) == (0xBC0), "NvdecRegisters is incorrect size");
|
||||
|
||||
#define ASSERT_REG_POSITION(field_name, position) \
|
||||
static_assert(offsetof(NvdecRegisters, field_name) == position * sizeof(u64), \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_REG_POSITION(set_codec_id, 0x80);
|
||||
ASSERT_REG_POSITION(execute, 0xC0);
|
||||
ASSERT_REG_POSITION(control_params, 0x100);
|
||||
ASSERT_REG_POSITION(picture_info_offset, 0x101);
|
||||
ASSERT_REG_POSITION(frame_bitstream_offset, 0x102);
|
||||
ASSERT_REG_POSITION(frame_number, 0x103);
|
||||
ASSERT_REG_POSITION(h264_slice_data_offsets, 0x104);
|
||||
ASSERT_REG_POSITION(frame_stats_offset, 0x109);
|
||||
ASSERT_REG_POSITION(h264_last_surface_luma_offset, 0x10A);
|
||||
ASSERT_REG_POSITION(h264_last_surface_chroma_offset, 0x10B);
|
||||
ASSERT_REG_POSITION(surface_luma_offset, 0x10C);
|
||||
ASSERT_REG_POSITION(surface_chroma_offset, 0x11D);
|
||||
ASSERT_REG_POSITION(vp8_prob_data_offset, 0x150);
|
||||
ASSERT_REG_POSITION(vp8_header_partition_buf_offset, 0x151);
|
||||
ASSERT_REG_POSITION(vp9_entropy_probs_offset, 0x170);
|
||||
ASSERT_REG_POSITION(vp9_backward_updates_offset, 0x171);
|
||||
ASSERT_REG_POSITION(vp9_last_frame_segmap_offset, 0x172);
|
||||
ASSERT_REG_POSITION(vp9_curr_frame_segmap_offset, 0x173);
|
||||
ASSERT_REG_POSITION(vp9_last_frame_mvs_offset, 0x175);
|
||||
ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176);
|
||||
|
||||
#undef ASSERT_REG_POSITION
|
||||
|
||||
} // namespace Tegra::Host1x::NvdecCommon
|
||||
|
||||
@@ -1,50 +1,50 @@
|
||||
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#include <algorithm>
|
||||
#include "sync_manager.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
|
||||
namespace Tegra {
|
||||
namespace Host1x {
|
||||
|
||||
SyncptIncrManager::SyncptIncrManager(Host1x& host1x_) : host1x(host1x_) {}
|
||||
SyncptIncrManager::~SyncptIncrManager() = default;
|
||||
|
||||
void SyncptIncrManager::Increment(u32 id) {
|
||||
increments.emplace_back(0, 0, id, true);
|
||||
IncrementAllDone();
|
||||
}
|
||||
|
||||
u32 SyncptIncrManager::IncrementWhenDone(u32 class_id, u32 id) {
|
||||
const u32 handle = current_id++;
|
||||
increments.emplace_back(handle, class_id, id);
|
||||
return handle;
|
||||
}
|
||||
|
||||
void SyncptIncrManager::SignalDone(u32 handle) {
|
||||
const auto done_incr =
|
||||
std::find_if(increments.begin(), increments.end(),
|
||||
[handle](const SyncptIncr& incr) { return incr.id == handle; });
|
||||
if (done_incr != increments.cend()) {
|
||||
done_incr->complete = true;
|
||||
}
|
||||
IncrementAllDone();
|
||||
}
|
||||
|
||||
void SyncptIncrManager::IncrementAllDone() {
|
||||
std::size_t done_count = 0;
|
||||
for (; done_count < increments.size(); ++done_count) {
|
||||
if (!increments[done_count].complete) {
|
||||
break;
|
||||
}
|
||||
auto& syncpoint_manager = host1x.GetSyncpointManager();
|
||||
syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id);
|
||||
syncpoint_manager.IncrementHost(increments[done_count].syncpt_id);
|
||||
}
|
||||
increments.erase(increments.begin(), increments.begin() + done_count);
|
||||
}
|
||||
|
||||
} // namespace Host1x
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#include <algorithm>
|
||||
#include "sync_manager.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
|
||||
namespace Tegra {
|
||||
namespace Host1x {
|
||||
|
||||
SyncptIncrManager::SyncptIncrManager(Host1x& host1x_) : host1x(host1x_) {}
|
||||
SyncptIncrManager::~SyncptIncrManager() = default;
|
||||
|
||||
void SyncptIncrManager::Increment(u32 id) {
|
||||
increments.emplace_back(0, 0, id, true);
|
||||
IncrementAllDone();
|
||||
}
|
||||
|
||||
u32 SyncptIncrManager::IncrementWhenDone(u32 class_id, u32 id) {
|
||||
const u32 handle = current_id++;
|
||||
increments.emplace_back(handle, class_id, id);
|
||||
return handle;
|
||||
}
|
||||
|
||||
void SyncptIncrManager::SignalDone(u32 handle) {
|
||||
const auto done_incr =
|
||||
std::find_if(increments.begin(), increments.end(),
|
||||
[handle](const SyncptIncr& incr) { return incr.id == handle; });
|
||||
if (done_incr != increments.cend()) {
|
||||
done_incr->complete = true;
|
||||
}
|
||||
IncrementAllDone();
|
||||
}
|
||||
|
||||
void SyncptIncrManager::IncrementAllDone() {
|
||||
std::size_t done_count = 0;
|
||||
for (; done_count < increments.size(); ++done_count) {
|
||||
if (!increments[done_count].complete) {
|
||||
break;
|
||||
}
|
||||
auto& syncpoint_manager = host1x.GetSyncpointManager();
|
||||
syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id);
|
||||
syncpoint_manager.IncrementHost(increments[done_count].syncpt_id);
|
||||
}
|
||||
increments.erase(increments.begin(), increments.begin() + done_count);
|
||||
}
|
||||
|
||||
} // namespace Host1x
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,53 +1,53 @@
|
||||
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class Host1x;
|
||||
|
||||
struct SyncptIncr {
|
||||
u32 id;
|
||||
u32 class_id;
|
||||
u32 syncpt_id;
|
||||
bool complete;
|
||||
|
||||
SyncptIncr(u32 id_, u32 class_id_, u32 syncpt_id_, bool done = false)
|
||||
: id(id_), class_id(class_id_), syncpt_id(syncpt_id_), complete(done) {}
|
||||
};
|
||||
|
||||
class SyncptIncrManager {
|
||||
public:
|
||||
explicit SyncptIncrManager(Host1x& host1x);
|
||||
~SyncptIncrManager();
|
||||
|
||||
/// Add syncpoint id and increment all
|
||||
void Increment(u32 id);
|
||||
|
||||
/// Returns a handle to increment later
|
||||
u32 IncrementWhenDone(u32 class_id, u32 id);
|
||||
|
||||
/// IncrememntAllDone, including handle
|
||||
void SignalDone(u32 handle);
|
||||
|
||||
/// Increment all sequential pending increments that are already done.
|
||||
void IncrementAllDone();
|
||||
|
||||
private:
|
||||
std::vector<SyncptIncr> increments;
|
||||
std::mutex increment_lock;
|
||||
u32 current_id{};
|
||||
|
||||
Host1x& host1x;
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class Host1x;
|
||||
|
||||
struct SyncptIncr {
|
||||
u32 id;
|
||||
u32 class_id;
|
||||
u32 syncpt_id;
|
||||
bool complete;
|
||||
|
||||
SyncptIncr(u32 id_, u32 class_id_, u32 syncpt_id_, bool done = false)
|
||||
: id(id_), class_id(class_id_), syncpt_id(syncpt_id_), complete(done) {}
|
||||
};
|
||||
|
||||
class SyncptIncrManager {
|
||||
public:
|
||||
explicit SyncptIncrManager(Host1x& host1x);
|
||||
~SyncptIncrManager();
|
||||
|
||||
/// Add syncpoint id and increment all
|
||||
void Increment(u32 id);
|
||||
|
||||
/// Returns a handle to increment later
|
||||
u32 IncrementWhenDone(u32 class_id, u32 id);
|
||||
|
||||
/// IncrememntAllDone, including handle
|
||||
void SignalDone(u32 handle);
|
||||
|
||||
/// Increment all sequential pending increments that are already done.
|
||||
void IncrementAllDone();
|
||||
|
||||
private:
|
||||
std::vector<SyncptIncr> increments;
|
||||
std::mutex increment_lock;
|
||||
u32 current_id{};
|
||||
|
||||
Host1x& host1x;
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,106 +1,106 @@
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "common/microprofile.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
|
||||
|
||||
SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
|
||||
std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value,
|
||||
std::function<void()>&& action) {
|
||||
if (syncpoint.load(std::memory_order_acquire) >= expected_value) {
|
||||
action();
|
||||
return {};
|
||||
}
|
||||
|
||||
std::unique_lock lk(guard);
|
||||
if (syncpoint.load(std::memory_order_relaxed) >= expected_value) {
|
||||
action();
|
||||
return {};
|
||||
}
|
||||
auto it = action_storage.begin();
|
||||
while (it != action_storage.end()) {
|
||||
if (it->expected_value >= expected_value) {
|
||||
break;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
return action_storage.emplace(it, expected_value, std::move(action));
|
||||
}
|
||||
|
||||
void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage,
|
||||
ActionHandle& handle) {
|
||||
std::unique_lock lk(guard);
|
||||
|
||||
// We want to ensure the iterator still exists prior to erasing it
|
||||
// Otherwise, if an invalid iterator was passed in then it could lead to UB
|
||||
// It is important to avoid UB in that case since the deregister isn't called from a locked
|
||||
// context
|
||||
for (auto it = action_storage.begin(); it != action_storage.end(); it++) {
|
||||
if (it == handle) {
|
||||
action_storage.erase(it);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) {
|
||||
DeregisterAction(guest_action_storage[syncpoint_id], handle);
|
||||
}
|
||||
|
||||
void SyncpointManager::DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle) {
|
||||
DeregisterAction(host_action_storage[syncpoint_id], handle);
|
||||
}
|
||||
|
||||
void SyncpointManager::IncrementGuest(u32 syncpoint_id) {
|
||||
Increment(syncpoints_guest[syncpoint_id], wait_guest_cv, guest_action_storage[syncpoint_id]);
|
||||
}
|
||||
|
||||
void SyncpointManager::IncrementHost(u32 syncpoint_id) {
|
||||
Increment(syncpoints_host[syncpoint_id], wait_host_cv, host_action_storage[syncpoint_id]);
|
||||
}
|
||||
|
||||
void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) {
|
||||
Wait(syncpoints_guest[syncpoint_id], wait_guest_cv, expected_value);
|
||||
}
|
||||
|
||||
void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) {
|
||||
MICROPROFILE_SCOPE(GPU_wait);
|
||||
Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value);
|
||||
}
|
||||
|
||||
void SyncpointManager::Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
|
||||
std::list<RegisteredAction>& action_storage) {
|
||||
auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1};
|
||||
|
||||
std::unique_lock lk(guard);
|
||||
auto it = action_storage.begin();
|
||||
while (it != action_storage.end()) {
|
||||
if (it->expected_value > new_value) {
|
||||
break;
|
||||
}
|
||||
it->action();
|
||||
it = action_storage.erase(it);
|
||||
}
|
||||
wait_cv.notify_all();
|
||||
}
|
||||
|
||||
void SyncpointManager::Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
|
||||
u32 expected_value) {
|
||||
const auto pred = [&]() { return syncpoint.load(std::memory_order_acquire) >= expected_value; };
|
||||
if (pred()) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::unique_lock lk(guard);
|
||||
wait_cv.wait(lk, pred);
|
||||
}
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "common/microprofile.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
|
||||
|
||||
SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
|
||||
std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value,
|
||||
std::function<void()>&& action) {
|
||||
if (syncpoint.load(std::memory_order_acquire) >= expected_value) {
|
||||
action();
|
||||
return {};
|
||||
}
|
||||
|
||||
std::unique_lock lk(guard);
|
||||
if (syncpoint.load(std::memory_order_relaxed) >= expected_value) {
|
||||
action();
|
||||
return {};
|
||||
}
|
||||
auto it = action_storage.begin();
|
||||
while (it != action_storage.end()) {
|
||||
if (it->expected_value >= expected_value) {
|
||||
break;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
return action_storage.emplace(it, expected_value, std::move(action));
|
||||
}
|
||||
|
||||
void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage,
|
||||
ActionHandle& handle) {
|
||||
std::unique_lock lk(guard);
|
||||
|
||||
// We want to ensure the iterator still exists prior to erasing it
|
||||
// Otherwise, if an invalid iterator was passed in then it could lead to UB
|
||||
// It is important to avoid UB in that case since the deregister isn't called from a locked
|
||||
// context
|
||||
for (auto it = action_storage.begin(); it != action_storage.end(); it++) {
|
||||
if (it == handle) {
|
||||
action_storage.erase(it);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) {
|
||||
DeregisterAction(guest_action_storage[syncpoint_id], handle);
|
||||
}
|
||||
|
||||
void SyncpointManager::DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle) {
|
||||
DeregisterAction(host_action_storage[syncpoint_id], handle);
|
||||
}
|
||||
|
||||
void SyncpointManager::IncrementGuest(u32 syncpoint_id) {
|
||||
Increment(syncpoints_guest[syncpoint_id], wait_guest_cv, guest_action_storage[syncpoint_id]);
|
||||
}
|
||||
|
||||
void SyncpointManager::IncrementHost(u32 syncpoint_id) {
|
||||
Increment(syncpoints_host[syncpoint_id], wait_host_cv, host_action_storage[syncpoint_id]);
|
||||
}
|
||||
|
||||
void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) {
|
||||
Wait(syncpoints_guest[syncpoint_id], wait_guest_cv, expected_value);
|
||||
}
|
||||
|
||||
void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) {
|
||||
MICROPROFILE_SCOPE(GPU_wait);
|
||||
Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value);
|
||||
}
|
||||
|
||||
void SyncpointManager::Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
|
||||
std::list<RegisteredAction>& action_storage) {
|
||||
auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1};
|
||||
|
||||
std::unique_lock lk(guard);
|
||||
auto it = action_storage.begin();
|
||||
while (it != action_storage.end()) {
|
||||
if (it->expected_value > new_value) {
|
||||
break;
|
||||
}
|
||||
it->action();
|
||||
it = action_storage.erase(it);
|
||||
}
|
||||
wait_cv.notify_all();
|
||||
}
|
||||
|
||||
void SyncpointManager::Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
|
||||
u32 expected_value) {
|
||||
const auto pred = [&]() { return syncpoint.load(std::memory_order_acquire) >= expected_value; };
|
||||
if (pred()) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::unique_lock lk(guard);
|
||||
wait_cv.wait(lk, pred);
|
||||
}
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,98 +1,98 @@
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <list>
|
||||
#include <mutex>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class SyncpointManager {
|
||||
public:
|
||||
u32 GetGuestSyncpointValue(u32 id) const {
|
||||
return syncpoints_guest[id].load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
u32 GetHostSyncpointValue(u32 id) const {
|
||||
return syncpoints_host[id].load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
struct RegisteredAction {
|
||||
explicit RegisteredAction(u32 expected_value_, std::function<void()>&& action_)
|
||||
: expected_value{expected_value_}, action{std::move(action_)} {}
|
||||
u32 expected_value;
|
||||
std::function<void()> action;
|
||||
};
|
||||
using ActionHandle = std::list<RegisteredAction>::iterator;
|
||||
|
||||
template <typename Func>
|
||||
ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
|
||||
std::function<void()> func(action);
|
||||
return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id],
|
||||
expected_value, std::move(func));
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
|
||||
std::function<void()> func(action);
|
||||
return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id],
|
||||
expected_value, std::move(func));
|
||||
}
|
||||
|
||||
void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle);
|
||||
|
||||
void DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle);
|
||||
|
||||
void IncrementGuest(u32 syncpoint_id);
|
||||
|
||||
void IncrementHost(u32 syncpoint_id);
|
||||
|
||||
void WaitGuest(u32 syncpoint_id, u32 expected_value);
|
||||
|
||||
void WaitHost(u32 syncpoint_id, u32 expected_value);
|
||||
|
||||
bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const {
|
||||
return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
|
||||
}
|
||||
|
||||
bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const {
|
||||
return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
|
||||
}
|
||||
|
||||
private:
|
||||
void Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
|
||||
std::list<RegisteredAction>& action_storage);
|
||||
|
||||
ActionHandle RegisterAction(std::atomic<u32>& syncpoint,
|
||||
std::list<RegisteredAction>& action_storage, u32 expected_value,
|
||||
std::function<void()>&& action);
|
||||
|
||||
void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle);
|
||||
|
||||
void Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, u32 expected_value);
|
||||
|
||||
static constexpr size_t NUM_MAX_SYNCPOINTS = 192;
|
||||
|
||||
std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_guest{};
|
||||
std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_host{};
|
||||
|
||||
std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> guest_action_storage;
|
||||
std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> host_action_storage;
|
||||
|
||||
std::mutex guard;
|
||||
std::condition_variable wait_guest_cv;
|
||||
std::condition_variable wait_host_cv;
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <list>
|
||||
#include <mutex>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class SyncpointManager {
|
||||
public:
|
||||
u32 GetGuestSyncpointValue(u32 id) const {
|
||||
return syncpoints_guest[id].load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
u32 GetHostSyncpointValue(u32 id) const {
|
||||
return syncpoints_host[id].load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
struct RegisteredAction {
|
||||
explicit RegisteredAction(u32 expected_value_, std::function<void()>&& action_)
|
||||
: expected_value{expected_value_}, action{std::move(action_)} {}
|
||||
u32 expected_value;
|
||||
std::function<void()> action;
|
||||
};
|
||||
using ActionHandle = std::list<RegisteredAction>::iterator;
|
||||
|
||||
template <typename Func>
|
||||
ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
|
||||
std::function<void()> func(action);
|
||||
return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id],
|
||||
expected_value, std::move(func));
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
|
||||
std::function<void()> func(action);
|
||||
return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id],
|
||||
expected_value, std::move(func));
|
||||
}
|
||||
|
||||
void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle);
|
||||
|
||||
void DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle);
|
||||
|
||||
void IncrementGuest(u32 syncpoint_id);
|
||||
|
||||
void IncrementHost(u32 syncpoint_id);
|
||||
|
||||
void WaitGuest(u32 syncpoint_id, u32 expected_value);
|
||||
|
||||
void WaitHost(u32 syncpoint_id, u32 expected_value);
|
||||
|
||||
bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const {
|
||||
return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
|
||||
}
|
||||
|
||||
bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const {
|
||||
return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
|
||||
}
|
||||
|
||||
private:
|
||||
void Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
|
||||
std::list<RegisteredAction>& action_storage);
|
||||
|
||||
ActionHandle RegisterAction(std::atomic<u32>& syncpoint,
|
||||
std::list<RegisteredAction>& action_storage, u32 expected_value,
|
||||
std::function<void()>&& action);
|
||||
|
||||
void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle);
|
||||
|
||||
void Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, u32 expected_value);
|
||||
|
||||
static constexpr size_t NUM_MAX_SYNCPOINTS = 192;
|
||||
|
||||
std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_guest{};
|
||||
std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_host{};
|
||||
|
||||
std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> guest_action_storage;
|
||||
std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> host_action_storage;
|
||||
|
||||
std::mutex guard;
|
||||
std::condition_variable wait_guest_cv;
|
||||
std::condition_variable wait_host_cv;
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,244 +1,244 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <array>
|
||||
|
||||
extern "C" {
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wconversion"
|
||||
#endif
|
||||
#include <libswscale/swscale.h>
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
}
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/logging/log.h"
|
||||
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/nvdec.h"
|
||||
#include "video_core/host1x/vic.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/textures/decoders.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
namespace {
|
||||
enum class VideoPixelFormat : u64_le {
|
||||
RGBA8 = 0x1f,
|
||||
BGRA8 = 0x20,
|
||||
RGBX8 = 0x23,
|
||||
YUV420 = 0x44,
|
||||
};
|
||||
} // Anonymous namespace
|
||||
|
||||
union VicConfig {
|
||||
u64_le raw{};
|
||||
BitField<0, 7, VideoPixelFormat> pixel_format;
|
||||
BitField<7, 2, u64_le> chroma_loc_horiz;
|
||||
BitField<9, 2, u64_le> chroma_loc_vert;
|
||||
BitField<11, 4, u64_le> block_linear_kind;
|
||||
BitField<15, 4, u64_le> block_linear_height_log2;
|
||||
BitField<32, 14, u64_le> surface_width_minus1;
|
||||
BitField<46, 14, u64_le> surface_height_minus1;
|
||||
};
|
||||
|
||||
Vic::Vic(Host1x& host1x_, std::shared_ptr<Nvdec> nvdec_processor_)
|
||||
: host1x(host1x_),
|
||||
nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {}
|
||||
|
||||
Vic::~Vic() = default;
|
||||
|
||||
void Vic::ProcessMethod(Method method, u32 argument) {
|
||||
LOG_DEBUG(HW_GPU, "Vic method 0x{:X}", static_cast<u32>(method));
|
||||
const u64 arg = static_cast<u64>(argument) << 8;
|
||||
switch (method) {
|
||||
case Method::Execute:
|
||||
Execute();
|
||||
break;
|
||||
case Method::SetConfigStructOffset:
|
||||
config_struct_address = arg;
|
||||
break;
|
||||
case Method::SetOutputSurfaceLumaOffset:
|
||||
output_surface_luma_address = arg;
|
||||
break;
|
||||
case Method::SetOutputSurfaceChromaOffset:
|
||||
output_surface_chroma_address = arg;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void Vic::Execute() {
|
||||
if (output_surface_luma_address == 0) {
|
||||
LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
|
||||
return;
|
||||
}
|
||||
const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)};
|
||||
const AVFramePtr frame_ptr = nvdec_processor->GetFrame();
|
||||
const auto* frame = frame_ptr.get();
|
||||
if (!frame) {
|
||||
return;
|
||||
}
|
||||
const u64 surface_width = config.surface_width_minus1 + 1;
|
||||
const u64 surface_height = config.surface_height_minus1 + 1;
|
||||
if (static_cast<u64>(frame->width) != surface_width ||
|
||||
static_cast<u64>(frame->height) != surface_height) {
|
||||
// TODO: Properly support multiple video streams with differing frame dimensions
|
||||
LOG_WARNING(Service_NVDRV, "Frame dimensions {}x{} don't match surface dimensions {}x{}",
|
||||
frame->width, frame->height, surface_width, surface_height);
|
||||
}
|
||||
switch (config.pixel_format) {
|
||||
case VideoPixelFormat::RGBA8:
|
||||
case VideoPixelFormat::BGRA8:
|
||||
case VideoPixelFormat::RGBX8:
|
||||
WriteRGBFrame(frame, config);
|
||||
break;
|
||||
case VideoPixelFormat::YUV420:
|
||||
WriteYUVFrame(frame, config);
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unknown video pixel format {:X}", config.pixel_format.Value());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
|
||||
LOG_TRACE(Service_NVDRV, "Writing RGB Frame");
|
||||
|
||||
if (!scaler_ctx || frame->width != scaler_width || frame->height != scaler_height) {
|
||||
const AVPixelFormat target_format = [pixel_format = config.pixel_format]() {
|
||||
switch (pixel_format) {
|
||||
case VideoPixelFormat::RGBA8:
|
||||
return AV_PIX_FMT_RGBA;
|
||||
case VideoPixelFormat::BGRA8:
|
||||
return AV_PIX_FMT_BGRA;
|
||||
case VideoPixelFormat::RGBX8:
|
||||
return AV_PIX_FMT_RGB0;
|
||||
default:
|
||||
return AV_PIX_FMT_RGBA;
|
||||
}
|
||||
}();
|
||||
|
||||
sws_freeContext(scaler_ctx);
|
||||
// Frames are decoded into either YUV420 or NV12 formats. Convert to desired RGB format
|
||||
scaler_ctx = sws_getContext(frame->width, frame->height,
|
||||
static_cast<AVPixelFormat>(frame->format), frame->width,
|
||||
frame->height, target_format, 0, nullptr, nullptr, nullptr);
|
||||
scaler_width = frame->width;
|
||||
scaler_height = frame->height;
|
||||
converted_frame_buffer.reset();
|
||||
}
|
||||
if (!converted_frame_buffer) {
|
||||
const size_t frame_size = frame->width * frame->height * 4;
|
||||
converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(frame_size)), av_free};
|
||||
}
|
||||
const std::array<int, 4> converted_stride{frame->width * 4, frame->height * 4, 0, 0};
|
||||
u8* const converted_frame_buf_addr{converted_frame_buffer.get()};
|
||||
sws_scale(scaler_ctx, frame->data, frame->linesize, 0, frame->height, &converted_frame_buf_addr,
|
||||
converted_stride.data());
|
||||
|
||||
// Use the minimum of surface/frame dimensions to avoid buffer overflow.
|
||||
const u32 surface_width = static_cast<u32>(config.surface_width_minus1) + 1;
|
||||
const u32 surface_height = static_cast<u32>(config.surface_height_minus1) + 1;
|
||||
const u32 width = std::min(surface_width, static_cast<u32>(frame->width));
|
||||
const u32 height = std::min(surface_height, static_cast<u32>(frame->height));
|
||||
const u32 blk_kind = static_cast<u32>(config.block_linear_kind);
|
||||
if (blk_kind != 0) {
|
||||
// swizzle pitch linear to block linear
|
||||
const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
|
||||
const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
|
||||
luma_buffer.resize(size);
|
||||
std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height);
|
||||
Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
|
||||
block_height, 0, width * 4);
|
||||
|
||||
host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
|
||||
} else {
|
||||
// send pitch linear frame
|
||||
const size_t linear_size = width * height * 4;
|
||||
host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
|
||||
linear_size);
|
||||
}
|
||||
}
|
||||
|
||||
void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
|
||||
LOG_TRACE(Service_NVDRV, "Writing YUV420 Frame");
|
||||
|
||||
const std::size_t surface_width = config.surface_width_minus1 + 1;
|
||||
const std::size_t surface_height = config.surface_height_minus1 + 1;
|
||||
const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL;
|
||||
// Use the minimum of surface/frame dimensions to avoid buffer overflow.
|
||||
const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width));
|
||||
const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height));
|
||||
|
||||
const auto stride = static_cast<size_t>(frame->linesize[0]);
|
||||
|
||||
luma_buffer.resize(aligned_width * surface_height);
|
||||
chroma_buffer.resize(aligned_width * surface_height / 2);
|
||||
|
||||
// Populate luma buffer
|
||||
const u8* luma_src = frame->data[0];
|
||||
for (std::size_t y = 0; y < frame_height; ++y) {
|
||||
const std::size_t src = y * stride;
|
||||
const std::size_t dst = y * aligned_width;
|
||||
for (std::size_t x = 0; x < frame_width; ++x) {
|
||||
luma_buffer[dst + x] = luma_src[src + x];
|
||||
}
|
||||
}
|
||||
host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
|
||||
luma_buffer.size());
|
||||
|
||||
// Chroma
|
||||
const std::size_t half_height = frame_height / 2;
|
||||
const auto half_stride = static_cast<size_t>(frame->linesize[1]);
|
||||
|
||||
switch (frame->format) {
|
||||
case AV_PIX_FMT_YUV420P: {
|
||||
// Frame from FFmpeg software
|
||||
// Populate chroma buffer from both channels with interleaving.
|
||||
const std::size_t half_width = frame_width / 2;
|
||||
const u8* chroma_b_src = frame->data[1];
|
||||
const u8* chroma_r_src = frame->data[2];
|
||||
for (std::size_t y = 0; y < half_height; ++y) {
|
||||
const std::size_t src = y * half_stride;
|
||||
const std::size_t dst = y * aligned_width;
|
||||
|
||||
for (std::size_t x = 0; x < half_width; ++x) {
|
||||
chroma_buffer[dst + x * 2] = chroma_b_src[src + x];
|
||||
chroma_buffer[dst + x * 2 + 1] = chroma_r_src[src + x];
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case AV_PIX_FMT_NV12: {
|
||||
// Frame from VA-API hardware
|
||||
// This is already interleaved so just copy
|
||||
const u8* chroma_src = frame->data[1];
|
||||
for (std::size_t y = 0; y < half_height; ++y) {
|
||||
const std::size_t src = y * stride;
|
||||
const std::size_t dst = y * aligned_width;
|
||||
for (std::size_t x = 0; x < frame_width; ++x) {
|
||||
chroma_buffer[dst + x] = chroma_src[src + x];
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ASSERT(false);
|
||||
break;
|
||||
}
|
||||
host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
|
||||
chroma_buffer.size());
|
||||
}
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <array>
|
||||
|
||||
extern "C" {
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wconversion"
|
||||
#endif
|
||||
#include <libswscale/swscale.h>
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
}
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/logging/log.h"
|
||||
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/nvdec.h"
|
||||
#include "video_core/host1x/vic.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/textures/decoders.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
namespace {
|
||||
enum class VideoPixelFormat : u64_le {
|
||||
RGBA8 = 0x1f,
|
||||
BGRA8 = 0x20,
|
||||
RGBX8 = 0x23,
|
||||
YUV420 = 0x44,
|
||||
};
|
||||
} // Anonymous namespace
|
||||
|
||||
union VicConfig {
|
||||
u64_le raw{};
|
||||
BitField<0, 7, VideoPixelFormat> pixel_format;
|
||||
BitField<7, 2, u64_le> chroma_loc_horiz;
|
||||
BitField<9, 2, u64_le> chroma_loc_vert;
|
||||
BitField<11, 4, u64_le> block_linear_kind;
|
||||
BitField<15, 4, u64_le> block_linear_height_log2;
|
||||
BitField<32, 14, u64_le> surface_width_minus1;
|
||||
BitField<46, 14, u64_le> surface_height_minus1;
|
||||
};
|
||||
|
||||
Vic::Vic(Host1x& host1x_, std::shared_ptr<Nvdec> nvdec_processor_)
|
||||
: host1x(host1x_),
|
||||
nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {}
|
||||
|
||||
Vic::~Vic() = default;
|
||||
|
||||
void Vic::ProcessMethod(Method method, u32 argument) {
|
||||
LOG_DEBUG(HW_GPU, "Vic method 0x{:X}", static_cast<u32>(method));
|
||||
const u64 arg = static_cast<u64>(argument) << 8;
|
||||
switch (method) {
|
||||
case Method::Execute:
|
||||
Execute();
|
||||
break;
|
||||
case Method::SetConfigStructOffset:
|
||||
config_struct_address = arg;
|
||||
break;
|
||||
case Method::SetOutputSurfaceLumaOffset:
|
||||
output_surface_luma_address = arg;
|
||||
break;
|
||||
case Method::SetOutputSurfaceChromaOffset:
|
||||
output_surface_chroma_address = arg;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void Vic::Execute() {
|
||||
if (output_surface_luma_address == 0) {
|
||||
LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
|
||||
return;
|
||||
}
|
||||
const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)};
|
||||
const AVFramePtr frame_ptr = nvdec_processor->GetFrame();
|
||||
const auto* frame = frame_ptr.get();
|
||||
if (!frame) {
|
||||
return;
|
||||
}
|
||||
const u64 surface_width = config.surface_width_minus1 + 1;
|
||||
const u64 surface_height = config.surface_height_minus1 + 1;
|
||||
if (static_cast<u64>(frame->width) != surface_width ||
|
||||
static_cast<u64>(frame->height) != surface_height) {
|
||||
// TODO: Properly support multiple video streams with differing frame dimensions
|
||||
LOG_WARNING(Service_NVDRV, "Frame dimensions {}x{} don't match surface dimensions {}x{}",
|
||||
frame->width, frame->height, surface_width, surface_height);
|
||||
}
|
||||
switch (config.pixel_format) {
|
||||
case VideoPixelFormat::RGBA8:
|
||||
case VideoPixelFormat::BGRA8:
|
||||
case VideoPixelFormat::RGBX8:
|
||||
WriteRGBFrame(frame, config);
|
||||
break;
|
||||
case VideoPixelFormat::YUV420:
|
||||
WriteYUVFrame(frame, config);
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unknown video pixel format {:X}", config.pixel_format.Value());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
|
||||
LOG_TRACE(Service_NVDRV, "Writing RGB Frame");
|
||||
|
||||
if (!scaler_ctx || frame->width != scaler_width || frame->height != scaler_height) {
|
||||
const AVPixelFormat target_format = [pixel_format = config.pixel_format]() {
|
||||
switch (pixel_format) {
|
||||
case VideoPixelFormat::RGBA8:
|
||||
return AV_PIX_FMT_RGBA;
|
||||
case VideoPixelFormat::BGRA8:
|
||||
return AV_PIX_FMT_BGRA;
|
||||
case VideoPixelFormat::RGBX8:
|
||||
return AV_PIX_FMT_RGB0;
|
||||
default:
|
||||
return AV_PIX_FMT_RGBA;
|
||||
}
|
||||
}();
|
||||
|
||||
sws_freeContext(scaler_ctx);
|
||||
// Frames are decoded into either YUV420 or NV12 formats. Convert to desired RGB format
|
||||
scaler_ctx = sws_getContext(frame->width, frame->height,
|
||||
static_cast<AVPixelFormat>(frame->format), frame->width,
|
||||
frame->height, target_format, 0, nullptr, nullptr, nullptr);
|
||||
scaler_width = frame->width;
|
||||
scaler_height = frame->height;
|
||||
converted_frame_buffer.reset();
|
||||
}
|
||||
if (!converted_frame_buffer) {
|
||||
const size_t frame_size = frame->width * frame->height * 4;
|
||||
converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(frame_size)), av_free};
|
||||
}
|
||||
const std::array<int, 4> converted_stride{frame->width * 4, frame->height * 4, 0, 0};
|
||||
u8* const converted_frame_buf_addr{converted_frame_buffer.get()};
|
||||
sws_scale(scaler_ctx, frame->data, frame->linesize, 0, frame->height, &converted_frame_buf_addr,
|
||||
converted_stride.data());
|
||||
|
||||
// Use the minimum of surface/frame dimensions to avoid buffer overflow.
|
||||
const u32 surface_width = static_cast<u32>(config.surface_width_minus1) + 1;
|
||||
const u32 surface_height = static_cast<u32>(config.surface_height_minus1) + 1;
|
||||
const u32 width = std::min(surface_width, static_cast<u32>(frame->width));
|
||||
const u32 height = std::min(surface_height, static_cast<u32>(frame->height));
|
||||
const u32 blk_kind = static_cast<u32>(config.block_linear_kind);
|
||||
if (blk_kind != 0) {
|
||||
// swizzle pitch linear to block linear
|
||||
const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
|
||||
const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
|
||||
luma_buffer.resize(size);
|
||||
std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height);
|
||||
Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
|
||||
block_height, 0, width * 4);
|
||||
|
||||
host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
|
||||
} else {
|
||||
// send pitch linear frame
|
||||
const size_t linear_size = width * height * 4;
|
||||
host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
|
||||
linear_size);
|
||||
}
|
||||
}
|
||||
|
||||
void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
|
||||
LOG_TRACE(Service_NVDRV, "Writing YUV420 Frame");
|
||||
|
||||
const std::size_t surface_width = config.surface_width_minus1 + 1;
|
||||
const std::size_t surface_height = config.surface_height_minus1 + 1;
|
||||
const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL;
|
||||
// Use the minimum of surface/frame dimensions to avoid buffer overflow.
|
||||
const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width));
|
||||
const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height));
|
||||
|
||||
const auto stride = static_cast<size_t>(frame->linesize[0]);
|
||||
|
||||
luma_buffer.resize(aligned_width * surface_height);
|
||||
chroma_buffer.resize(aligned_width * surface_height / 2);
|
||||
|
||||
// Populate luma buffer
|
||||
const u8* luma_src = frame->data[0];
|
||||
for (std::size_t y = 0; y < frame_height; ++y) {
|
||||
const std::size_t src = y * stride;
|
||||
const std::size_t dst = y * aligned_width;
|
||||
for (std::size_t x = 0; x < frame_width; ++x) {
|
||||
luma_buffer[dst + x] = luma_src[src + x];
|
||||
}
|
||||
}
|
||||
host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
|
||||
luma_buffer.size());
|
||||
|
||||
// Chroma
|
||||
const std::size_t half_height = frame_height / 2;
|
||||
const auto half_stride = static_cast<size_t>(frame->linesize[1]);
|
||||
|
||||
switch (frame->format) {
|
||||
case AV_PIX_FMT_YUV420P: {
|
||||
// Frame from FFmpeg software
|
||||
// Populate chroma buffer from both channels with interleaving.
|
||||
const std::size_t half_width = frame_width / 2;
|
||||
const u8* chroma_b_src = frame->data[1];
|
||||
const u8* chroma_r_src = frame->data[2];
|
||||
for (std::size_t y = 0; y < half_height; ++y) {
|
||||
const std::size_t src = y * half_stride;
|
||||
const std::size_t dst = y * aligned_width;
|
||||
|
||||
for (std::size_t x = 0; x < half_width; ++x) {
|
||||
chroma_buffer[dst + x * 2] = chroma_b_src[src + x];
|
||||
chroma_buffer[dst + x * 2 + 1] = chroma_r_src[src + x];
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case AV_PIX_FMT_NV12: {
|
||||
// Frame from VA-API hardware
|
||||
// This is already interleaved so just copy
|
||||
const u8* chroma_src = frame->data[1];
|
||||
for (std::size_t y = 0; y < half_height; ++y) {
|
||||
const std::size_t src = y * stride;
|
||||
const std::size_t dst = y * aligned_width;
|
||||
for (std::size_t x = 0; x < frame_width; ++x) {
|
||||
chroma_buffer[dst + x] = chroma_src[src + x];
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ASSERT(false);
|
||||
break;
|
||||
}
|
||||
host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
|
||||
chroma_buffer.size());
|
||||
}
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
@@ -1,66 +1,66 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
|
||||
struct SwsContext;
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class Host1x;
|
||||
class Nvdec;
|
||||
union VicConfig;
|
||||
|
||||
class Vic {
|
||||
public:
|
||||
enum class Method : u32 {
|
||||
Execute = 0xc0,
|
||||
SetControlParams = 0x1c1,
|
||||
SetConfigStructOffset = 0x1c2,
|
||||
SetOutputSurfaceLumaOffset = 0x1c8,
|
||||
SetOutputSurfaceChromaOffset = 0x1c9,
|
||||
SetOutputSurfaceChromaUnusedOffset = 0x1ca
|
||||
};
|
||||
|
||||
explicit Vic(Host1x& host1x, std::shared_ptr<Nvdec> nvdec_processor);
|
||||
|
||||
~Vic();
|
||||
|
||||
/// Write to the device state.
|
||||
void ProcessMethod(Method method, u32 argument);
|
||||
|
||||
private:
|
||||
void Execute();
|
||||
|
||||
void WriteRGBFrame(const AVFrame* frame, const VicConfig& config);
|
||||
|
||||
void WriteYUVFrame(const AVFrame* frame, const VicConfig& config);
|
||||
|
||||
Host1x& host1x;
|
||||
std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
|
||||
|
||||
/// Avoid reallocation of the following buffers every frame, as their
|
||||
/// size does not change during a stream
|
||||
using AVMallocPtr = std::unique_ptr<u8, decltype(&av_free)>;
|
||||
AVMallocPtr converted_frame_buffer;
|
||||
std::vector<u8> luma_buffer;
|
||||
std::vector<u8> chroma_buffer;
|
||||
|
||||
GPUVAddr config_struct_address{};
|
||||
GPUVAddr output_surface_luma_address{};
|
||||
GPUVAddr output_surface_chroma_address{};
|
||||
|
||||
SwsContext* scaler_ctx{};
|
||||
s32 scaler_width{};
|
||||
s32 scaler_height{};
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
|
||||
struct SwsContext;
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class Host1x;
|
||||
class Nvdec;
|
||||
union VicConfig;
|
||||
|
||||
class Vic {
|
||||
public:
|
||||
enum class Method : u32 {
|
||||
Execute = 0xc0,
|
||||
SetControlParams = 0x1c1,
|
||||
SetConfigStructOffset = 0x1c2,
|
||||
SetOutputSurfaceLumaOffset = 0x1c8,
|
||||
SetOutputSurfaceChromaOffset = 0x1c9,
|
||||
SetOutputSurfaceChromaUnusedOffset = 0x1ca
|
||||
};
|
||||
|
||||
explicit Vic(Host1x& host1x, std::shared_ptr<Nvdec> nvdec_processor);
|
||||
|
||||
~Vic();
|
||||
|
||||
/// Write to the device state.
|
||||
void ProcessMethod(Method method, u32 argument);
|
||||
|
||||
private:
|
||||
void Execute();
|
||||
|
||||
void WriteRGBFrame(const AVFrame* frame, const VicConfig& config);
|
||||
|
||||
void WriteYUVFrame(const AVFrame* frame, const VicConfig& config);
|
||||
|
||||
Host1x& host1x;
|
||||
std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
|
||||
|
||||
/// Avoid reallocation of the following buffers every frame, as their
|
||||
/// size does not change during a stream
|
||||
using AVMallocPtr = std::unique_ptr<u8, decltype(&av_free)>;
|
||||
AVMallocPtr converted_frame_buffer;
|
||||
std::vector<u8> luma_buffer;
|
||||
std::vector<u8> chroma_buffer;
|
||||
|
||||
GPUVAddr config_struct_address{};
|
||||
GPUVAddr output_surface_luma_address{};
|
||||
GPUVAddr output_surface_chroma_address{};
|
||||
|
||||
SwsContext* scaler_ctx{};
|
||||
s32 scaler_width{};
|
||||
s32 scaler_height{};
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
|
||||
Reference in New Issue
Block a user