early-access version 3242
This commit is contained in:
parent
7c2fb8c0da
commit
79ff2722d6
@ -1,7 +1,7 @@
|
||||
yuzu emulator early access
|
||||
=============
|
||||
|
||||
This is the source code for early-access 3241.
|
||||
This is the source code for early-access 3242.
|
||||
|
||||
## Legal Notice
|
||||
|
||||
|
@ -78,6 +78,7 @@ add_library(common STATIC
|
||||
logging/types.h
|
||||
lz4_compression.cpp
|
||||
lz4_compression.h
|
||||
make_unique_for_overwrite.h
|
||||
math_util.h
|
||||
memory_detect.cpp
|
||||
memory_detect.h
|
||||
@ -101,6 +102,7 @@ add_library(common STATIC
|
||||
${CMAKE_CURRENT_BINARY_DIR}/scm_rev.cpp
|
||||
scm_rev.h
|
||||
scope_exit.h
|
||||
scratch_buffer.h
|
||||
settings.cpp
|
||||
settings.h
|
||||
settings_input.cpp
|
||||
|
25
src/common/make_unique_for_overwrite.h
Executable file
25
src/common/make_unique_for_overwrite.h
Executable file
@ -0,0 +1,25 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
|
||||
namespace Common {
|
||||
|
||||
template <class T>
|
||||
requires(!std::is_array_v<T>) std::unique_ptr<T> make_unique_for_overwrite() {
|
||||
return std::unique_ptr<T>(new T);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
requires std::is_unbounded_array_v<T> std::unique_ptr<T> make_unique_for_overwrite(std::size_t n) {
|
||||
return std::unique_ptr<T>(new std::remove_extent_t<T>[n]);
|
||||
}
|
||||
|
||||
template <class T, class... Args>
|
||||
requires std::is_bounded_array_v<T>
|
||||
void make_unique_for_overwrite(Args&&...) = delete;
|
||||
|
||||
} // namespace Common
|
95
src/common/scratch_buffer.h
Executable file
95
src/common/scratch_buffer.h
Executable file
@ -0,0 +1,95 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/make_unique_for_overwrite.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
/**
|
||||
* ScratchBuffer class
|
||||
* This class creates a default initialized heap allocated buffer for cases such as intermediate
|
||||
* buffers being copied into entirely, where value initializing members during allocation or resize
|
||||
* is redundant.
|
||||
*/
|
||||
template <typename T>
|
||||
class ScratchBuffer {
|
||||
public:
|
||||
ScratchBuffer() = default;
|
||||
|
||||
explicit ScratchBuffer(size_t initial_capacity)
|
||||
: last_requested_size{initial_capacity}, buffer_capacity{initial_capacity},
|
||||
buffer{Common::make_unique_for_overwrite<T[]>(initial_capacity)} {}
|
||||
|
||||
~ScratchBuffer() = default;
|
||||
|
||||
/// This will only grow the buffer's capacity if size is greater than the current capacity.
|
||||
/// The previously held data will remain intact.
|
||||
void resize(size_t size) {
|
||||
if (size > buffer_capacity) {
|
||||
auto new_buffer = Common::make_unique_for_overwrite<T[]>(size);
|
||||
std::move(buffer.get(), buffer.get() + buffer_capacity, new_buffer.get());
|
||||
buffer = std::move(new_buffer);
|
||||
buffer_capacity = size;
|
||||
}
|
||||
last_requested_size = size;
|
||||
}
|
||||
|
||||
/// This will only grow the buffer's capacity if size is greater than the current capacity.
|
||||
/// The previously held data will be destroyed if a reallocation occurs.
|
||||
void resize_destructive(size_t size) {
|
||||
if (size > buffer_capacity) {
|
||||
buffer_capacity = size;
|
||||
buffer = Common::make_unique_for_overwrite<T[]>(buffer_capacity);
|
||||
}
|
||||
last_requested_size = size;
|
||||
}
|
||||
|
||||
[[nodiscard]] T* data() noexcept {
|
||||
return buffer.get();
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* data() const noexcept {
|
||||
return buffer.get();
|
||||
}
|
||||
|
||||
[[nodiscard]] T* begin() noexcept {
|
||||
return data();
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* begin() const noexcept {
|
||||
return data();
|
||||
}
|
||||
|
||||
[[nodiscard]] T* end() noexcept {
|
||||
return data() + last_requested_size;
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* end() const noexcept {
|
||||
return data() + last_requested_size;
|
||||
}
|
||||
|
||||
[[nodiscard]] T& operator[](size_t i) {
|
||||
return buffer[i];
|
||||
}
|
||||
|
||||
[[nodiscard]] const T& operator[](size_t i) const {
|
||||
return buffer[i];
|
||||
}
|
||||
|
||||
[[nodiscard]] size_t size() const noexcept {
|
||||
return last_requested_size;
|
||||
}
|
||||
|
||||
[[nodiscard]] size_t capacity() const noexcept {
|
||||
return buffer_capacity;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t last_requested_size{};
|
||||
size_t buffer_capacity{};
|
||||
std::unique_ptr<T[]> buffer{};
|
||||
};
|
||||
|
||||
} // namespace Common
|
@ -8,6 +8,7 @@ add_executable(tests
|
||||
common/host_memory.cpp
|
||||
common/param_package.cpp
|
||||
common/ring_buffer.cpp
|
||||
common/scratch_buffer.cpp
|
||||
common/unique_function.cpp
|
||||
core/core_timing.cpp
|
||||
core/internal_network/network.cpp
|
||||
|
199
src/tests/common/scratch_buffer.cpp
Executable file
199
src/tests/common/scratch_buffer.cpp
Executable file
@ -0,0 +1,199 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <span>
|
||||
#include <catch2/catch.hpp>
|
||||
#include "common/common_types.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
TEST_CASE("ScratchBuffer: Basic Test", "[common]") {
|
||||
ScratchBuffer<u8> buf;
|
||||
|
||||
REQUIRE(buf.size() == 0U);
|
||||
REQUIRE(buf.capacity() == 0U);
|
||||
|
||||
std::array<u8, 10> payload;
|
||||
payload.fill(66);
|
||||
|
||||
buf.resize(payload.size());
|
||||
REQUIRE(buf.size() == payload.size());
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
std::memcpy(buf.data(), payload.data(), payload.size());
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: resize_destructive Grow", "[common]") {
|
||||
std::array<u8, 10> payload;
|
||||
payload.fill(66);
|
||||
|
||||
ScratchBuffer<u8> buf(payload.size());
|
||||
REQUIRE(buf.size() == payload.size());
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
// Increasing the size should reallocate the buffer
|
||||
buf.resize_destructive(payload.size() * 2);
|
||||
REQUIRE(buf.size() == payload.size() * 2);
|
||||
REQUIRE(buf.capacity() == payload.size() * 2);
|
||||
|
||||
// Since the buffer is not value initialized, reading its data will be garbage
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: resize_destructive Shrink", "[common]") {
|
||||
std::array<u8, 10> payload;
|
||||
payload.fill(66);
|
||||
|
||||
ScratchBuffer<u8> buf(payload.size());
|
||||
REQUIRE(buf.size() == payload.size());
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
std::memcpy(buf.data(), payload.data(), payload.size());
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
|
||||
// Decreasing the size should not cause a buffer reallocation
|
||||
// This can be tested by ensuring the buffer capacity and data has not changed,
|
||||
buf.resize_destructive(1U);
|
||||
REQUIRE(buf.size() == 1U);
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: resize Grow u8", "[common]") {
|
||||
std::array<u8, 10> payload;
|
||||
payload.fill(66);
|
||||
|
||||
ScratchBuffer<u8> buf(payload.size());
|
||||
REQUIRE(buf.size() == payload.size());
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
std::memcpy(buf.data(), payload.data(), payload.size());
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
|
||||
// Increasing the size should reallocate the buffer
|
||||
buf.resize(payload.size() * 2);
|
||||
REQUIRE(buf.size() == payload.size() * 2);
|
||||
REQUIRE(buf.capacity() == payload.size() * 2);
|
||||
|
||||
// resize() keeps the previous data intact
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: resize Grow u64", "[common]") {
|
||||
std::array<u64, 10> payload;
|
||||
payload.fill(6666);
|
||||
|
||||
ScratchBuffer<u64> buf(payload.size());
|
||||
REQUIRE(buf.size() == payload.size());
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
std::memcpy(buf.data(), payload.data(), payload.size() * sizeof(u64));
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
|
||||
// Increasing the size should reallocate the buffer
|
||||
buf.resize(payload.size() * 2);
|
||||
REQUIRE(buf.size() == payload.size() * 2);
|
||||
REQUIRE(buf.capacity() == payload.size() * 2);
|
||||
|
||||
// resize() keeps the previous data intact
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: resize Shrink", "[common]") {
|
||||
std::array<u8, 10> payload;
|
||||
payload.fill(66);
|
||||
|
||||
ScratchBuffer<u8> buf(payload.size());
|
||||
REQUIRE(buf.size() == payload.size());
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
std::memcpy(buf.data(), payload.data(), payload.size());
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
|
||||
// Decreasing the size should not cause a buffer reallocation
|
||||
// This can be tested by ensuring the buffer capacity and data has not changed,
|
||||
buf.resize(1U);
|
||||
REQUIRE(buf.size() == 1U);
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: Span Size", "[common]") {
|
||||
std::array<u8, 10> payload;
|
||||
payload.fill(66);
|
||||
|
||||
ScratchBuffer<u8> buf(payload.size());
|
||||
REQUIRE(buf.size() == payload.size());
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
std::memcpy(buf.data(), payload.data(), payload.size());
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
|
||||
buf.resize(3U);
|
||||
REQUIRE(buf.size() == 3U);
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
const auto buf_span = std::span<u8>(buf);
|
||||
// The span size is the last requested size of the buffer, not its capacity
|
||||
REQUIRE(buf_span.size() == buf.size());
|
||||
|
||||
for (size_t i = 0; i < buf_span.size(); ++i) {
|
||||
REQUIRE(buf_span[i] == buf[i]);
|
||||
REQUIRE(buf_span[i] == payload[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: Span Writes", "[common]") {
|
||||
std::array<u8, 10> payload;
|
||||
payload.fill(66);
|
||||
|
||||
ScratchBuffer<u8> buf(payload.size());
|
||||
REQUIRE(buf.size() == payload.size());
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
std::memcpy(buf.data(), payload.data(), payload.size());
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
|
||||
buf.resize(3U);
|
||||
REQUIRE(buf.size() == 3U);
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
const auto buf_span = std::span<u8>(buf);
|
||||
REQUIRE(buf_span.size() == buf.size());
|
||||
|
||||
for (size_t i = 0; i < buf_span.size(); ++i) {
|
||||
const auto new_value = static_cast<u8>(i + 1U);
|
||||
// Writes to a span of the scratch buffer will propogate to the buffer itself
|
||||
buf_span[i] = new_value;
|
||||
REQUIRE(buf[i] == new_value);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Common
|
@ -20,6 +20,7 @@
|
||||
#include "common/lru_cache.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/polyfill_ranges.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/buffer_cache/buffer_base.h"
|
||||
@ -422,8 +423,7 @@ private:
|
||||
IntervalSet common_ranges;
|
||||
std::deque<IntervalSet> committed_ranges;
|
||||
|
||||
size_t immediate_buffer_capacity = 0;
|
||||
std::unique_ptr<u8[]> immediate_buffer_alloc;
|
||||
Common::ScratchBuffer<u8> immediate_buffer_alloc;
|
||||
|
||||
struct LRUItemParams {
|
||||
using ObjectType = BufferId;
|
||||
@ -1927,11 +1927,8 @@ std::span<const u8> BufferCache<P>::ImmediateBufferWithData(VAddr cpu_addr, size
|
||||
|
||||
template <class P>
|
||||
std::span<u8> BufferCache<P>::ImmediateBuffer(size_t wanted_capacity) {
|
||||
if (wanted_capacity > immediate_buffer_capacity) {
|
||||
immediate_buffer_capacity = wanted_capacity;
|
||||
immediate_buffer_alloc = std::make_unique<u8[]>(wanted_capacity);
|
||||
}
|
||||
return std::span<u8>(immediate_buffer_alloc.get(), wanted_capacity);
|
||||
immediate_buffer_alloc.resize_destructive(wanted_capacity);
|
||||
return std::span<u8>(immediate_buffer_alloc.data(), wanted_capacity);
|
||||
}
|
||||
|
||||
template <class P>
|
||||
|
@ -56,7 +56,7 @@ bool DmaPusher::Step() {
|
||||
|
||||
if (command_list.prefetch_command_list.size()) {
|
||||
// Prefetched command list from nvdrv, used for things like synchronization
|
||||
command_headers = std::move(command_list.prefetch_command_list);
|
||||
ProcessCommands(command_list.prefetch_command_list);
|
||||
dma_pushbuffer.pop();
|
||||
} else {
|
||||
const CommandListHeader command_list_header{
|
||||
@ -74,7 +74,7 @@ bool DmaPusher::Step() {
|
||||
}
|
||||
|
||||
// Push buffer non-empty, read a word
|
||||
command_headers.resize(command_list_header.size);
|
||||
command_headers.resize_destructive(command_list_header.size);
|
||||
if (Settings::IsGPULevelHigh()) {
|
||||
memory_manager.ReadBlock(dma_get, command_headers.data(),
|
||||
command_list_header.size * sizeof(u32));
|
||||
@ -82,16 +82,21 @@ bool DmaPusher::Step() {
|
||||
memory_manager.ReadBlockUnsafe(dma_get, command_headers.data(),
|
||||
command_list_header.size * sizeof(u32));
|
||||
}
|
||||
ProcessCommands(command_headers);
|
||||
}
|
||||
for (std::size_t index = 0; index < command_headers.size();) {
|
||||
const CommandHeader& command_header = command_headers[index];
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void DmaPusher::ProcessCommands(std::span<const CommandHeader> commands) {
|
||||
for (std::size_t index = 0; index < commands.size();) {
|
||||
const CommandHeader& command_header = commands[index];
|
||||
|
||||
if (dma_state.method_count) {
|
||||
// Data word of methods command
|
||||
if (dma_state.non_incrementing) {
|
||||
const u32 max_write = static_cast<u32>(
|
||||
std::min<std::size_t>(index + dma_state.method_count, command_headers.size()) -
|
||||
index);
|
||||
std::min<std::size_t>(index + dma_state.method_count, commands.size()) - index);
|
||||
CallMultiMethod(&command_header.argument, max_write);
|
||||
dma_state.method_count -= max_write;
|
||||
dma_state.is_last_call = true;
|
||||
@ -142,8 +147,6 @@ bool DmaPusher::Step() {
|
||||
}
|
||||
index++;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void DmaPusher::SetState(const CommandHeader& command_header) {
|
||||
|
@ -4,11 +4,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <span>
|
||||
#include <vector>
|
||||
#include <queue>
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
#include "video_core/engines/engine_interface.h"
|
||||
#include "video_core/engines/puller.h"
|
||||
|
||||
@ -136,13 +138,15 @@ private:
|
||||
static constexpr u32 non_puller_methods = 0x40;
|
||||
static constexpr u32 max_subchannels = 8;
|
||||
bool Step();
|
||||
void ProcessCommands(std::span<const CommandHeader> commands);
|
||||
|
||||
void SetState(const CommandHeader& command_header);
|
||||
|
||||
void CallMethod(u32 argument) const;
|
||||
void CallMultiMethod(const u32* base_start, u32 num_methods) const;
|
||||
|
||||
std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once
|
||||
Common::ScratchBuffer<CommandHeader>
|
||||
command_headers; ///< Buffer for list of commands fetched at once
|
||||
|
||||
std::queue<CommandList> dma_pushbuffer; ///< Queue of command lists to be processed
|
||||
std::size_t dma_pushbuffer_subindex{}; ///< Index within a command list within the pushbuffer
|
||||
@ -159,7 +163,7 @@ private:
|
||||
DmaState dma_state{};
|
||||
bool dma_increment_once{};
|
||||
|
||||
bool ib_enable{true}; ///< IB mode enabled
|
||||
const bool ib_enable{true}; ///< IB mode enabled
|
||||
|
||||
std::array<Engines::EngineInterface*, max_subchannels> subchannels{};
|
||||
|
||||
|
@ -24,7 +24,7 @@ void State::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
|
||||
void State::ProcessExec(const bool is_linear_) {
|
||||
write_offset = 0;
|
||||
copy_size = regs.line_length_in * regs.line_count;
|
||||
inner_buffer.resize(copy_size);
|
||||
inner_buffer.resize_destructive(copy_size);
|
||||
is_linear = is_linear_;
|
||||
}
|
||||
|
||||
@ -70,7 +70,7 @@ void State::ProcessData(std::span<const u8> read_buffer) {
|
||||
const std::size_t dst_size = Tegra::Texture::CalculateSize(
|
||||
true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth,
|
||||
regs.dest.BlockHeight(), regs.dest.BlockDepth());
|
||||
tmp_buffer.resize(dst_size);
|
||||
tmp_buffer.resize_destructive(dst_size);
|
||||
memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
|
||||
Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width,
|
||||
regs.dest.height, regs.dest.depth, x_offset, regs.dest.y,
|
||||
|
@ -4,9 +4,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <span>
|
||||
#include <vector>
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
|
||||
namespace Tegra {
|
||||
class MemoryManager;
|
||||
@ -73,8 +74,8 @@ private:
|
||||
|
||||
u32 write_offset = 0;
|
||||
u32 copy_size = 0;
|
||||
std::vector<u8> inner_buffer;
|
||||
std::vector<u8> tmp_buffer;
|
||||
Common::ScratchBuffer<u8> inner_buffer;
|
||||
Common::ScratchBuffer<u8> tmp_buffer;
|
||||
bool is_linear = false;
|
||||
Registers& regs;
|
||||
MemoryManager& memory_manager;
|
||||
|
@ -184,12 +184,8 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
|
||||
const size_t src_size =
|
||||
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
|
||||
|
||||
if (read_buffer.size() < src_size) {
|
||||
read_buffer.resize(src_size);
|
||||
}
|
||||
if (write_buffer.size() < dst_size) {
|
||||
write_buffer.resize(dst_size);
|
||||
}
|
||||
read_buffer.resize_destructive(src_size);
|
||||
write_buffer.resize_destructive(dst_size);
|
||||
|
||||
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
||||
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
||||
@ -235,12 +231,8 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
|
||||
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
|
||||
const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
|
||||
|
||||
if (read_buffer.size() < src_size) {
|
||||
read_buffer.resize(src_size);
|
||||
}
|
||||
if (write_buffer.size() < dst_size) {
|
||||
write_buffer.resize(dst_size);
|
||||
}
|
||||
read_buffer.resize_destructive(src_size);
|
||||
write_buffer.resize_destructive(dst_size);
|
||||
|
||||
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
||||
if (Settings::IsGPULevelExtreme()) {
|
||||
@ -269,12 +261,8 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
|
||||
pos_x = pos_x % x_in_gob;
|
||||
pos_y = pos_y % 8;
|
||||
|
||||
if (read_buffer.size() < src_size) {
|
||||
read_buffer.resize(src_size);
|
||||
}
|
||||
if (write_buffer.size() < dst_size) {
|
||||
write_buffer.resize(dst_size);
|
||||
}
|
||||
read_buffer.resize_destructive(src_size);
|
||||
write_buffer.resize_destructive(dst_size);
|
||||
|
||||
if (Settings::IsGPULevelExtreme()) {
|
||||
memory_manager.ReadBlock(regs.offset_in + offset, read_buffer.data(), src_size);
|
||||
@ -333,14 +321,10 @@ void MaxwellDMA::CopyBlockLinearToBlockLinear() {
|
||||
const u32 pitch = x_elements * bytes_per_pixel;
|
||||
const size_t mid_buffer_size = pitch * regs.line_count;
|
||||
|
||||
if (read_buffer.size() < src_size) {
|
||||
read_buffer.resize(src_size);
|
||||
}
|
||||
if (write_buffer.size() < dst_size) {
|
||||
write_buffer.resize(dst_size);
|
||||
}
|
||||
read_buffer.resize_destructive(src_size);
|
||||
write_buffer.resize_destructive(dst_size);
|
||||
|
||||
intermediate_buffer.resize(mid_buffer_size);
|
||||
intermediate_buffer.resize_destructive(mid_buffer_size);
|
||||
|
||||
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
||||
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
||||
|
@ -6,8 +6,10 @@
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
#include "video_core/engines/engine_interface.h"
|
||||
|
||||
namespace Core {
|
||||
@ -234,9 +236,9 @@ private:
|
||||
MemoryManager& memory_manager;
|
||||
VideoCore::RasterizerInterface* rasterizer = nullptr;
|
||||
|
||||
std::vector<u8> read_buffer;
|
||||
std::vector<u8> write_buffer;
|
||||
std::vector<u8> intermediate_buffer;
|
||||
Common::ScratchBuffer<u8> read_buffer;
|
||||
Common::ScratchBuffer<u8> write_buffer;
|
||||
Common::ScratchBuffer<u8> intermediate_buffer;
|
||||
|
||||
static constexpr std::size_t NUM_REGS = 0x800;
|
||||
struct Regs {
|
||||
|
@ -155,7 +155,7 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
|
||||
// swizzle pitch linear to block linear
|
||||
const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
|
||||
const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
|
||||
luma_buffer.resize(size);
|
||||
luma_buffer.resize_destructive(size);
|
||||
std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height);
|
||||
Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
|
||||
block_height, 0, width * 4);
|
||||
@ -181,8 +181,8 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
|
||||
|
||||
const auto stride = static_cast<size_t>(frame->linesize[0]);
|
||||
|
||||
luma_buffer.resize(aligned_width * surface_height);
|
||||
chroma_buffer.resize(aligned_width * surface_height / 2);
|
||||
luma_buffer.resize_destructive(aligned_width * surface_height);
|
||||
chroma_buffer.resize_destructive(aligned_width * surface_height / 2);
|
||||
|
||||
// Populate luma buffer
|
||||
const u8* luma_src = frame->data[0];
|
||||
|
@ -4,8 +4,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
|
||||
struct SwsContext;
|
||||
|
||||
@ -49,8 +50,8 @@ private:
|
||||
/// size does not change during a stream
|
||||
using AVMallocPtr = std::unique_ptr<u8, decltype(&av_free)>;
|
||||
AVMallocPtr converted_frame_buffer;
|
||||
std::vector<u8> luma_buffer;
|
||||
std::vector<u8> chroma_buffer;
|
||||
Common::ScratchBuffer<u8> luma_buffer;
|
||||
Common::ScratchBuffer<u8> chroma_buffer;
|
||||
|
||||
GPUVAddr config_struct_address{};
|
||||
GPUVAddr output_surface_luma_address{};
|
||||
|
@ -39,6 +39,12 @@ TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface&
|
||||
sampler_descriptor.mipmap_filter.Assign(Tegra::Texture::TextureMipmapFilter::Linear);
|
||||
sampler_descriptor.cubemap_anisotropy.Assign(1);
|
||||
|
||||
// These values were chosen based on typical peak swizzle data sizes seen in some titles
|
||||
static constexpr size_t SWIZZLE_DATA_BUFFER_INITIAL_CAPACITY = 8_MiB;
|
||||
static constexpr size_t UNSWIZZLE_DATA_BUFFER_INITIAL_CAPACITY = 1_MiB;
|
||||
swizzle_data_buffer.reserve(SWIZZLE_DATA_BUFFER_INITIAL_CAPACITY);
|
||||
unswizzle_data_buffer.reserve(UNSWIZZLE_DATA_BUFFER_INITIAL_CAPACITY);
|
||||
|
||||
// Make sure the first index is reserved for the null resources
|
||||
// This way the null resource becomes a compile time constant
|
||||
void(slot_images.insert(NullImageParams{}));
|
||||
@ -90,7 +96,8 @@ void TextureCache<P>::RunGarbageCollector() {
|
||||
const auto copies = FullDownloadCopies(image.info);
|
||||
image.DownloadMemory(map, copies);
|
||||
runtime.Finish();
|
||||
SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
|
||||
SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span,
|
||||
swizzle_data_buffer);
|
||||
}
|
||||
if (True(image.flags & ImageFlagBits::Tracked)) {
|
||||
UntrackImage(image, image_id);
|
||||
@ -461,7 +468,8 @@ void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
|
||||
const auto copies = FullDownloadCopies(image.info);
|
||||
image.DownloadMemory(map, copies);
|
||||
runtime.Finish();
|
||||
SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
|
||||
SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span,
|
||||
swizzle_data_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
@ -672,7 +680,8 @@ void TextureCache<P>::PopAsyncFlushes() {
|
||||
for (const ImageId image_id : download_ids) {
|
||||
const ImageBase& image = slot_images[image_id];
|
||||
const auto copies = FullDownloadCopies(image.info);
|
||||
SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, download_span);
|
||||
SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, download_span,
|
||||
swizzle_data_buffer);
|
||||
download_map.offset += image.unswizzled_size_bytes;
|
||||
download_span = download_span.subspan(image.unswizzled_size_bytes);
|
||||
}
|
||||
@ -734,13 +743,21 @@ void TextureCache<P>::UploadImageContents(Image& image, StagingBuffer& staging)
|
||||
gpu_memory->ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes());
|
||||
const auto uploads = FullUploadSwizzles(image.info);
|
||||
runtime.AccelerateImageUpload(image, staging, uploads);
|
||||
} else if (True(image.flags & ImageFlagBits::Converted)) {
|
||||
std::vector<u8> unswizzled_data(image.unswizzled_size_bytes);
|
||||
auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, unswizzled_data);
|
||||
ConvertImage(unswizzled_data, image.info, mapped_span, copies);
|
||||
return;
|
||||
}
|
||||
const size_t guest_size_bytes = image.guest_size_bytes;
|
||||
swizzle_data_buffer.resize(guest_size_bytes);
|
||||
gpu_memory->ReadBlockUnsafe(gpu_addr, swizzle_data_buffer.data(), guest_size_bytes);
|
||||
|
||||
if (True(image.flags & ImageFlagBits::Converted)) {
|
||||
unswizzle_data_buffer.resize(image.unswizzled_size_bytes);
|
||||
auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, swizzle_data_buffer,
|
||||
unswizzle_data_buffer);
|
||||
ConvertImage(unswizzle_data_buffer, image.info, mapped_span, copies);
|
||||
image.UploadMemory(staging, copies);
|
||||
} else {
|
||||
const auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, mapped_span);
|
||||
const auto copies =
|
||||
UnswizzleImage(*gpu_memory, gpu_addr, image.info, swizzle_data_buffer, mapped_span);
|
||||
image.UploadMemory(staging, copies);
|
||||
}
|
||||
}
|
||||
@ -910,7 +927,7 @@ void TextureCache<P>::InvalidateScale(Image& image) {
|
||||
}
|
||||
|
||||
template <class P>
|
||||
u64 TextureCache<P>::GetScaledImageSizeBytes(ImageBase& image) {
|
||||
u64 TextureCache<P>::GetScaledImageSizeBytes(const ImageBase& image) {
|
||||
const u64 scale_up = static_cast<u64>(Settings::values.resolution_info.up_scale *
|
||||
Settings::values.resolution_info.up_scale);
|
||||
const u64 down_shift = static_cast<u64>(Settings::values.resolution_info.down_shift +
|
||||
|
@ -368,7 +368,7 @@ private:
|
||||
void InvalidateScale(Image& image);
|
||||
bool ScaleUp(Image& image);
|
||||
bool ScaleDown(Image& image);
|
||||
u64 GetScaledImageSizeBytes(ImageBase& image);
|
||||
u64 GetScaledImageSizeBytes(const ImageBase& image);
|
||||
|
||||
Runtime& runtime;
|
||||
|
||||
@ -417,6 +417,9 @@ private:
|
||||
|
||||
std::unordered_map<GPUVAddr, ImageAllocId> image_allocs_table;
|
||||
|
||||
std::vector<u8> swizzle_data_buffer;
|
||||
std::vector<u8> unswizzle_data_buffer;
|
||||
|
||||
u64 modification_tick = 0;
|
||||
u64 frame_tick = 0;
|
||||
};
|
||||
|
@ -505,7 +505,7 @@ void SwizzlePitchLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr
|
||||
|
||||
void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr,
|
||||
const ImageInfo& info, const BufferImageCopy& copy,
|
||||
std::span<const u8> input) {
|
||||
std::span<const u8> input, std::vector<u8>& tmp_buffer) {
|
||||
const Extent3D size = info.size;
|
||||
const LevelInfo level_info = MakeLevelInfo(info);
|
||||
const Extent2D tile_size = DefaultBlockSize(info.format);
|
||||
@ -534,8 +534,8 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr
|
||||
tile_size.height, info.tile_width_spacing);
|
||||
const size_t subresource_size = sizes[level];
|
||||
|
||||
const auto dst_data = std::make_unique<u8[]>(subresource_size);
|
||||
const std::span<u8> dst(dst_data.get(), subresource_size);
|
||||
tmp_buffer.resize(subresource_size);
|
||||
const std::span<u8> dst(tmp_buffer);
|
||||
|
||||
for (s32 layer = 0; layer < info.resources.layers; ++layer) {
|
||||
const std::span<const u8> src = input.subspan(host_offset);
|
||||
@ -765,8 +765,9 @@ bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config
|
||||
}
|
||||
|
||||
std::vector<BufferImageCopy> UnswizzleImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr,
|
||||
const ImageInfo& info, std::span<u8> output) {
|
||||
const size_t guest_size_bytes = CalculateGuestSizeInBytes(info);
|
||||
const ImageInfo& info, std::span<const u8> input,
|
||||
std::span<u8> output) {
|
||||
const size_t guest_size_bytes = input.size_bytes();
|
||||
const u32 bpp_log2 = BytesPerBlockLog2(info.format);
|
||||
const Extent3D size = info.size;
|
||||
|
||||
@ -789,10 +790,6 @@ std::vector<BufferImageCopy> UnswizzleImage(Tegra::MemoryManager& gpu_memory, GP
|
||||
.image_extent = size,
|
||||
}};
|
||||
}
|
||||
const auto input_data = std::make_unique<u8[]>(guest_size_bytes);
|
||||
gpu_memory.ReadBlockUnsafe(gpu_addr, input_data.get(), guest_size_bytes);
|
||||
const std::span<const u8> input(input_data.get(), guest_size_bytes);
|
||||
|
||||
const LevelInfo level_info = MakeLevelInfo(info);
|
||||
const s32 num_layers = info.resources.layers;
|
||||
const s32 num_levels = info.resources.levels;
|
||||
@ -980,13 +977,14 @@ std::vector<SwizzleParameters> FullUploadSwizzles(const ImageInfo& info) {
|
||||
}
|
||||
|
||||
void SwizzleImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr, const ImageInfo& info,
|
||||
std::span<const BufferImageCopy> copies, std::span<const u8> memory) {
|
||||
std::span<const BufferImageCopy> copies, std::span<const u8> memory,
|
||||
std::vector<u8>& tmp_buffer) {
|
||||
const bool is_pitch_linear = info.type == ImageType::Linear;
|
||||
for (const BufferImageCopy& copy : copies) {
|
||||
if (is_pitch_linear) {
|
||||
SwizzlePitchLinearImage(gpu_memory, gpu_addr, info, copy, memory);
|
||||
} else {
|
||||
SwizzleBlockLinearImage(gpu_memory, gpu_addr, info, copy, memory);
|
||||
SwizzleBlockLinearImage(gpu_memory, gpu_addr, info, copy, memory, tmp_buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -59,6 +59,7 @@ struct OverlapResult {
|
||||
|
||||
[[nodiscard]] std::vector<BufferImageCopy> UnswizzleImage(Tegra::MemoryManager& gpu_memory,
|
||||
GPUVAddr gpu_addr, const ImageInfo& info,
|
||||
std::span<const u8> input,
|
||||
std::span<u8> output);
|
||||
|
||||
[[nodiscard]] BufferCopy UploadBufferCopy(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr,
|
||||
@ -76,7 +77,8 @@ void ConvertImage(std::span<const u8> input, const ImageInfo& info, std::span<u8
|
||||
[[nodiscard]] std::vector<SwizzleParameters> FullUploadSwizzles(const ImageInfo& info);
|
||||
|
||||
void SwizzleImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr, const ImageInfo& info,
|
||||
std::span<const BufferImageCopy> copies, std::span<const u8> memory);
|
||||
std::span<const BufferImageCopy> copies, std::span<const u8> memory,
|
||||
std::vector<u8>& tmp_buffer);
|
||||
|
||||
[[nodiscard]] bool IsBlockLinearSizeCompatible(const ImageInfo& new_info,
|
||||
const ImageInfo& overlap_info, u32 new_level,
|
||||
|
Loading…
Reference in New Issue
Block a user