early-access version 2401

This commit is contained in:
pineappleEA 2022-01-16 08:10:16 +01:00
parent 29316c5ff1
commit 0e51e385d9
20 changed files with 251 additions and 44 deletions

View File

@ -1,7 +1,7 @@
yuzu emulator early access yuzu emulator early access
============= =============
This is the source code for early-access 2400. This is the source code for early-access 2401.
## Legal Notice ## Legal Notice

View File

@ -78,8 +78,9 @@ class BufferCache {
static constexpr BufferId NULL_BUFFER_ID{0}; static constexpr BufferId NULL_BUFFER_ID{0};
static constexpr u64 EXPECTED_MEMORY = 512_MiB; static constexpr s64 DEFAULT_EXPECTED_MEMORY = 512_MiB;
static constexpr u64 CRITICAL_MEMORY = 1_GiB; static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB;
static constexpr s64 TARGET_THRESHOLD = 4_GiB;
using Maxwell = Tegra::Engines::Maxwell3D::Regs; using Maxwell = Tegra::Engines::Maxwell3D::Regs;
@ -436,6 +437,8 @@ private:
Common::LeastRecentlyUsedCache<LRUItemParams> lru_cache; Common::LeastRecentlyUsedCache<LRUItemParams> lru_cache;
u64 frame_tick = 0; u64 frame_tick = 0;
u64 total_used_memory = 0; u64 total_used_memory = 0;
u64 minimum_memory = 0;
u64 critical_memory = 0;
std::array<BufferId, ((1ULL << 39) >> PAGE_BITS)> page_table; std::array<BufferId, ((1ULL << 39) >> PAGE_BITS)> page_table;
}; };
@ -451,11 +454,30 @@ BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
// Ensure the first slot is used for the null buffer // Ensure the first slot is used for the null buffer
void(slot_buffers.insert(runtime, NullBufferParams{})); void(slot_buffers.insert(runtime, NullBufferParams{}));
common_ranges.clear(); common_ranges.clear();
if (!runtime.CanReportMemoryUsage()) {
minimum_memory = DEFAULT_EXPECTED_MEMORY;
critical_memory = DEFAULT_CRITICAL_MEMORY;
return;
}
const s64 device_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
const s64 min_spacing_expected = device_memory - 1_GiB - 512_MiB;
const s64 min_spacing_critical = device_memory - 1_GiB;
const s64 mem_tresshold = std::min(device_memory, TARGET_THRESHOLD);
const s64 min_vacancy_expected = (6 * mem_tresshold) / 10;
const s64 min_vacancy_critical = (3 * mem_tresshold) / 10;
minimum_memory = static_cast<u64>(
std::max(std::min(device_memory - min_vacancy_expected, min_spacing_expected),
DEFAULT_EXPECTED_MEMORY));
critical_memory = static_cast<u64>(
std::max(std::min(device_memory - min_vacancy_critical, min_spacing_critical),
DEFAULT_CRITICAL_MEMORY));
} }
template <class P> template <class P>
void BufferCache<P>::RunGarbageCollector() { void BufferCache<P>::RunGarbageCollector() {
const bool aggressive_gc = total_used_memory >= CRITICAL_MEMORY; const bool aggressive_gc = total_used_memory >= critical_memory;
const u64 ticks_to_destroy = aggressive_gc ? 60 : 120; const u64 ticks_to_destroy = aggressive_gc ? 60 : 120;
int num_iterations = aggressive_gc ? 64 : 32; int num_iterations = aggressive_gc ? 64 : 32;
const auto clean_up = [this, &num_iterations](BufferId buffer_id) { const auto clean_up = [this, &num_iterations](BufferId buffer_id) {
@ -486,7 +508,11 @@ void BufferCache<P>::TickFrame() {
const bool skip_preferred = hits * 256 < shots * 251; const bool skip_preferred = hits * 256 < shots * 251;
uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0; uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
if (total_used_memory >= EXPECTED_MEMORY) { // If we can obtain the memory info, use it instead of the estimate.
if (runtime.CanReportMemoryUsage()) {
total_used_memory = runtime.GetDeviceMemoryUsage();
}
if (total_used_memory >= minimum_memory) {
RunGarbageCollector(); RunGarbageCollector();
} }
++frame_tick; ++frame_tick;

View File

@ -513,7 +513,7 @@ struct GPU::Impl {
ProcessFenceActionMethod(); ProcessFenceActionMethod();
break; break;
case BufferMethods::WaitForInterrupt: case BufferMethods::WaitForInterrupt:
ProcessWaitForInterruptMethod(); rasterizer->WaitForIdle();
break; break;
case BufferMethods::SemaphoreTrigger: { case BufferMethods::SemaphoreTrigger: {
ProcessSemaphoreTriggerMethod(); ProcessSemaphoreTriggerMethod();

View File

@ -135,6 +135,24 @@ BufferCacheRuntime::BufferCacheRuntime(const Device& device_)
buffer.Create(); buffer.Create();
glNamedBufferData(buffer.handle, 0x10'000, nullptr, GL_STREAM_COPY); glNamedBufferData(buffer.handle, 0x10'000, nullptr, GL_STREAM_COPY);
} }
device_access_memory = []() -> u64 {
if (GLAD_GL_NVX_gpu_memory_info) {
GLint cur_avail_mem_kb = 0;
glGetIntegerv(GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX, &cur_avail_mem_kb);
return static_cast<u64>(cur_avail_mem_kb) * 1_KiB;
}
return 2_GiB; // Return minimum requirements
}();
}
u64 BufferCacheRuntime::GetDeviceMemoryUsage() const {
if (GLAD_GL_NVX_gpu_memory_info) {
GLint cur_avail_mem_kb = 0;
glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &cur_avail_mem_kb);
return device_access_memory - static_cast<u64>(cur_avail_mem_kb) * 1_KiB;
}
return 2_GiB;
} }
void BufferCacheRuntime::CopyBuffer(Buffer& dst_buffer, Buffer& src_buffer, void BufferCacheRuntime::CopyBuffer(Buffer& dst_buffer, Buffer& src_buffer,

View File

@ -153,6 +153,16 @@ public:
use_storage_buffers = use_storage_buffers_; use_storage_buffers = use_storage_buffers_;
} }
u64 GetDeviceLocalMemory() const {
return device_access_memory;
}
u64 GetDeviceMemoryUsage() const;
bool CanReportMemoryUsage() const {
return GLAD_GL_NVX_gpu_memory_info;
}
private: private:
static constexpr std::array PABO_LUT{ static constexpr std::array PABO_LUT{
GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV, GL_TESS_CONTROL_PROGRAM_PARAMETER_BUFFER_NV, GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV, GL_TESS_CONTROL_PROGRAM_PARAMETER_BUFFER_NV,
@ -186,6 +196,8 @@ private:
std::array<OGLBuffer, VideoCommon::NUM_COMPUTE_UNIFORM_BUFFERS> copy_compute_uniforms; std::array<OGLBuffer, VideoCommon::NUM_COMPUTE_UNIFORM_BUFFERS> copy_compute_uniforms;
u32 index_buffer_offset = 0; u32 index_buffer_offset = 0;
u64 device_access_memory;
}; };
struct BufferCacheParams { struct BufferCacheParams {

View File

@ -484,6 +484,15 @@ TextureCacheRuntime::TextureCacheRuntime(const Device& device_, ProgramManager&
rescale_read_fbos[i].Create(); rescale_read_fbos[i].Create();
} }
} }
device_access_memory = []() -> u64 {
if (GLAD_GL_NVX_gpu_memory_info) {
GLint cur_avail_mem_kb = 0;
glGetIntegerv(GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX, &cur_avail_mem_kb);
return static_cast<u64>(cur_avail_mem_kb) * 1_KiB;
}
return 2_GiB; // Return minimum requirements
}();
} }
TextureCacheRuntime::~TextureCacheRuntime() = default; TextureCacheRuntime::~TextureCacheRuntime() = default;
@ -500,13 +509,13 @@ ImageBufferMap TextureCacheRuntime::DownloadStagingBuffer(size_t size) {
return download_buffers.RequestMap(size, false); return download_buffers.RequestMap(size, false);
} }
u64 TextureCacheRuntime::GetDeviceLocalMemory() const { u64 TextureCacheRuntime::GetDeviceMemoryUsage() const {
if (GLAD_GL_NVX_gpu_memory_info) { if (GLAD_GL_NVX_gpu_memory_info) {
GLint cur_avail_mem_kb = 0; GLint cur_avail_mem_kb = 0;
glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &cur_avail_mem_kb); glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &cur_avail_mem_kb);
return static_cast<u64>(cur_avail_mem_kb) * 1_KiB; return device_access_memory - static_cast<u64>(cur_avail_mem_kb) * 1_KiB;
} }
return 2_GiB; // Return minimum requirements return 2_GiB;
} }
void TextureCacheRuntime::CopyImage(Image& dst_image, Image& src_image, void TextureCacheRuntime::CopyImage(Image& dst_image, Image& src_image,
@ -686,6 +695,7 @@ Image::Image(TextureCacheRuntime& runtime_, const VideoCommon::ImageInfo& info_,
} }
if (IsConverted(runtime->device, info.format, info.type)) { if (IsConverted(runtime->device, info.format, info.type)) {
flags |= ImageFlagBits::Converted; flags |= ImageFlagBits::Converted;
flags |= ImageFlagBits::GCProtected;
gl_internal_format = IsPixelFormatSRGB(info.format) ? GL_SRGB8_ALPHA8 : GL_RGBA8; gl_internal_format = IsPixelFormatSRGB(info.format) ? GL_SRGB8_ALPHA8 : GL_RGBA8;
gl_format = GL_RGBA; gl_format = GL_RGBA;
gl_type = GL_UNSIGNED_INT_8_8_8_8_REV; gl_type = GL_UNSIGNED_INT_8_8_8_8_REV;

View File

@ -83,7 +83,15 @@ public:
ImageBufferMap DownloadStagingBuffer(size_t size); ImageBufferMap DownloadStagingBuffer(size_t size);
u64 GetDeviceLocalMemory() const; u64 GetDeviceLocalMemory() const {
return device_access_memory;
}
u64 GetDeviceMemoryUsage() const;
bool CanReportMemoryUsage() const {
return GLAD_GL_NVX_gpu_memory_info;
}
bool ShouldReinterpret([[maybe_unused]] Image& dst, [[maybe_unused]] Image& src) { bool ShouldReinterpret([[maybe_unused]] Image& dst, [[maybe_unused]] Image& src) {
return true; return true;
@ -172,6 +180,7 @@ private:
std::array<OGLFramebuffer, 4> rescale_draw_fbos; std::array<OGLFramebuffer, 4> rescale_draw_fbos;
std::array<OGLFramebuffer, 4> rescale_read_fbos; std::array<OGLFramebuffer, 4> rescale_read_fbos;
const Settings::ResolutionScalingInfo& resolution; const Settings::ResolutionScalingInfo& resolution;
u64 device_access_memory;
}; };
class Image : public VideoCommon::ImageBase { class Image : public VideoCommon::ImageBase {

View File

@ -141,6 +141,18 @@ StagingBufferRef BufferCacheRuntime::DownloadStagingBuffer(size_t size) {
return staging_pool.Request(size, MemoryUsage::Download); return staging_pool.Request(size, MemoryUsage::Download);
} }
u64 BufferCacheRuntime::GetDeviceLocalMemory() const {
return device.GetDeviceLocalMemory();
}
u64 BufferCacheRuntime::GetDeviceMemoryUsage() const {
return device.GetDeviceMemoryUsage();
}
bool BufferCacheRuntime::CanReportMemoryUsage() const {
return device.CanReportMemoryUsage();
}
void BufferCacheRuntime::Finish() { void BufferCacheRuntime::Finish() {
scheduler.Finish(); scheduler.Finish();
} }

View File

@ -65,6 +65,12 @@ public:
void Finish(); void Finish();
u64 GetDeviceLocalMemory() const;
u64 GetDeviceMemoryUsage() const;
bool CanReportMemoryUsage() const;
[[nodiscard]] StagingBufferRef UploadStagingBuffer(size_t size); [[nodiscard]] StagingBufferRef UploadStagingBuffer(size_t size);
[[nodiscard]] StagingBufferRef DownloadStagingBuffer(size_t size); [[nodiscard]] StagingBufferRef DownloadStagingBuffer(size_t size);

View File

@ -118,7 +118,7 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
.image = nullptr, .image = nullptr,
.buffer = *stream_buffer, .buffer = *stream_buffer,
}; };
const auto memory_properties = device.GetPhysical().GetMemoryProperties(); const auto memory_properties = device.GetPhysical().GetMemoryProperties().memoryProperties;
VkMemoryAllocateInfo stream_memory_info{ VkMemoryAllocateInfo stream_memory_info{
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.pNext = make_dedicated ? &dedicated_info : nullptr, .pNext = make_dedicated ? &dedicated_info : nullptr,

View File

@ -1189,6 +1189,14 @@ u64 TextureCacheRuntime::GetDeviceLocalMemory() const {
return device.GetDeviceLocalMemory(); return device.GetDeviceLocalMemory();
} }
u64 TextureCacheRuntime::GetDeviceMemoryUsage() const {
return device.GetDeviceMemoryUsage();
}
bool TextureCacheRuntime::CanReportMemoryUsage() const {
return device.CanReportMemoryUsage();
}
void TextureCacheRuntime::TickFrame() {} void TextureCacheRuntime::TickFrame() {}
Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu_addr_, Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu_addr_,
@ -1203,6 +1211,7 @@ Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu
} else { } else {
flags |= VideoCommon::ImageFlagBits::Converted; flags |= VideoCommon::ImageFlagBits::Converted;
} }
flags |= VideoCommon::ImageFlagBits::GCProtected;
} }
if (runtime->device.HasDebuggingToolAttached()) { if (runtime->device.HasDebuggingToolAttached()) {
original_image.SetObjectNameEXT(VideoCommon::Name(*this).c_str()); original_image.SetObjectNameEXT(VideoCommon::Name(*this).c_str());

View File

@ -55,6 +55,10 @@ public:
u64 GetDeviceLocalMemory() const; u64 GetDeviceLocalMemory() const;
u64 GetDeviceMemoryUsage() const;
bool CanReportMemoryUsage() const;
void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src, void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src,
const Region2D& dst_region, const Region2D& src_region, const Region2D& dst_region, const Region2D& src_region,
Tegra::Engines::Fermi2D::Filter filter, Tegra::Engines::Fermi2D::Filter filter,

View File

@ -29,15 +29,16 @@ enum class ImageFlagBits : u32 {
Sparse = 1 << 9, ///< Image has non continous submemory. Sparse = 1 << 9, ///< Image has non continous submemory.
// Garbage Collection Flags // Garbage Collection Flags
BadOverlap = 1 << 10, ///< This image overlaps other but doesn't fit, has higher BadOverlap = 1 << 10, ///< This image overlaps other but doesn't fit, has higher
///< garbage collection priority ///< garbage collection priority
Alias = 1 << 11, ///< This image has aliases and has priority on garbage Alias = 1 << 11, ///< This image has aliases and has priority on garbage
///< collection ///< collection
GCProtected = 1 << 12, ///< Protected from low-tier GC as they are costy to load back.
// Rescaler // Rescaler
Rescaled = 1 << 12, Rescaled = 1 << 13,
CheckingRescalable = 1 << 13, CheckingRescalable = 1 << 14,
IsRescalable = 1 << 14, IsRescalable = 1 << 15,
}; };
DECLARE_ENUM_FLAG_OPERATORS(ImageFlagBits) DECLARE_ENUM_FLAG_OPERATORS(ImageFlagBits)

View File

@ -50,14 +50,20 @@ TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface&
void(slot_samplers.insert(runtime, sampler_descriptor)); void(slot_samplers.insert(runtime, sampler_descriptor));
if constexpr (HAS_DEVICE_MEMORY_INFO) { if constexpr (HAS_DEVICE_MEMORY_INFO) {
const auto device_memory = runtime.GetDeviceLocalMemory(); const s64 device_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
const u64 possible_expected_memory = (device_memory * 4) / 10; const s64 min_spacing_expected = device_memory - 1_GiB - 512_MiB;
const u64 possible_critical_memory = (device_memory * 7) / 10; const s64 min_spacing_critical = device_memory - 1_GiB;
expected_memory = std::max(possible_expected_memory, DEFAULT_EXPECTED_MEMORY - 256_MiB); const s64 mem_tresshold = std::min(device_memory, TARGET_THRESHOLD);
critical_memory = std::max(possible_critical_memory, DEFAULT_CRITICAL_MEMORY - 512_MiB); const s64 min_vacancy_expected = (6 * mem_tresshold) / 10;
minimum_memory = 0; const s64 min_vacancy_critical = (3 * mem_tresshold) / 10;
expected_memory = static_cast<u64>(
std::max(std::min(device_memory - min_vacancy_expected, min_spacing_expected),
DEFAULT_EXPECTED_MEMORY));
critical_memory = static_cast<u64>(
std::max(std::min(device_memory - min_vacancy_critical, min_spacing_critical),
DEFAULT_CRITICAL_MEMORY));
minimum_memory = static_cast<u64>((device_memory - mem_tresshold) / 2);
} else { } else {
// On OpenGL we can be more conservatives as the driver takes care.
expected_memory = DEFAULT_EXPECTED_MEMORY + 512_MiB; expected_memory = DEFAULT_EXPECTED_MEMORY + 512_MiB;
critical_memory = DEFAULT_CRITICAL_MEMORY + 1_GiB; critical_memory = DEFAULT_CRITICAL_MEMORY + 1_GiB;
minimum_memory = 0; minimum_memory = 0;
@ -66,17 +72,19 @@ TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface&
template <class P> template <class P>
void TextureCache<P>::RunGarbageCollector() { void TextureCache<P>::RunGarbageCollector() {
const bool high_priority_mode = total_used_memory >= expected_memory; bool high_priority_mode = total_used_memory >= expected_memory;
const bool aggressive_mode = total_used_memory >= critical_memory; bool aggressive_mode = total_used_memory >= critical_memory;
const u64 ticks_to_destroy = aggressive_mode ? 10ULL : high_priority_mode ? 25ULL : 100ULL; const u64 ticks_to_destroy = aggressive_mode ? 10ULL : high_priority_mode ? 25ULL : 50ULL;
size_t num_iterations = aggressive_mode ? 300 : (high_priority_mode ? 50 : 10); size_t num_iterations = aggressive_mode ? 40 : (high_priority_mode ? 20 : 10);
const auto clean_up = [this, &num_iterations, high_priority_mode](ImageId image_id) { const auto clean_up = [this, &num_iterations, &high_priority_mode,
&aggressive_mode](ImageId image_id) {
if (num_iterations == 0) { if (num_iterations == 0) {
return true; return true;
} }
--num_iterations; --num_iterations;
auto& image = slot_images[image_id]; auto& image = slot_images[image_id];
const bool must_download = image.IsSafeDownload(); const bool must_download =
image.IsSafeDownload() && False(image.flags & ImageFlagBits::BadOverlap);
if (!high_priority_mode && must_download) { if (!high_priority_mode && must_download) {
return false; return false;
} }
@ -92,6 +100,18 @@ void TextureCache<P>::RunGarbageCollector() {
} }
UnregisterImage(image_id); UnregisterImage(image_id);
DeleteImage(image_id, image.scale_tick > frame_tick + 5); DeleteImage(image_id, image.scale_tick > frame_tick + 5);
if (total_used_memory < critical_memory) {
if (aggressive_mode) {
// Sink the aggresiveness.
num_iterations >>= 2;
aggressive_mode = false;
return false;
}
if (high_priority_mode && total_used_memory < expected_memory) {
num_iterations >>= 1;
high_priority_mode = false;
}
}
return false; return false;
}; };
lru_cache.ForEachItemBelow(frame_tick - ticks_to_destroy, clean_up); lru_cache.ForEachItemBelow(frame_tick - ticks_to_destroy, clean_up);
@ -99,6 +119,10 @@ void TextureCache<P>::RunGarbageCollector() {
template <class P> template <class P>
void TextureCache<P>::TickFrame() { void TextureCache<P>::TickFrame() {
// If we can obtain the memory info, use it instead of the estimate.
if (runtime.CanReportMemoryUsage()) {
total_used_memory = runtime.GetDeviceMemoryUsage();
}
if (total_used_memory > minimum_memory) { if (total_used_memory > minimum_memory) {
RunGarbageCollector(); RunGarbageCollector();
} }
@ -106,6 +130,7 @@ void TextureCache<P>::TickFrame() {
sentenced_framebuffers.Tick(); sentenced_framebuffers.Tick();
sentenced_image_view.Tick(); sentenced_image_view.Tick();
runtime.TickFrame(); runtime.TickFrame();
critical_gc = 0;
++frame_tick; ++frame_tick;
} }
@ -1052,6 +1077,11 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
for (const ImageId overlap_id : overlap_ids) { for (const ImageId overlap_id : overlap_ids) {
Image& overlap = slot_images[overlap_id]; Image& overlap = slot_images[overlap_id];
if (True(overlap.flags & ImageFlagBits::GpuModified)) {
new_image.flags |= ImageFlagBits::GpuModified;
new_image.modification_tick =
std::max(overlap.modification_tick, new_image.modification_tick);
}
if (overlap.info.num_samples != new_image.info.num_samples) { if (overlap.info.num_samples != new_image.info.num_samples) {
LOG_WARNING(HW_GPU, "Copying between images with different samples is not implemented"); LOG_WARNING(HW_GPU, "Copying between images with different samples is not implemented");
} else { } else {
@ -1414,6 +1444,10 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format); tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format);
} }
total_used_memory += Common::AlignUp(tentative_size, 1024); total_used_memory += Common::AlignUp(tentative_size, 1024);
if (total_used_memory > critical_memory && critical_gc < GC_EMERGENCY_COUNTS) {
RunGarbageCollector();
critical_gc++;
}
image.lru_index = lru_cache.Insert(image_id, frame_tick); image.lru_index = lru_cache.Insert(image_id, frame_tick);
ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, ForEachGPUPage(image.gpu_addr, image.guest_size_bytes,
@ -1704,6 +1738,9 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick); most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick);
aliased_images.push_back(&aliased); aliased_images.push_back(&aliased);
any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled); any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled);
if (True(aliased_image.flags & ImageFlagBits::GpuModified)) {
image.flags |= ImageFlagBits::GpuModified;
}
} }
} }
if (aliased_images.empty()) { if (aliased_images.empty()) {

View File

@ -60,8 +60,10 @@ class TextureCache {
/// True when the API can provide info about the memory of the device. /// True when the API can provide info about the memory of the device.
static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO; static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO;
static constexpr u64 DEFAULT_EXPECTED_MEMORY = 1_GiB; static constexpr s64 TARGET_THRESHOLD = 4_GiB;
static constexpr u64 DEFAULT_CRITICAL_MEMORY = 2_GiB; static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB;
static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB;
static constexpr size_t GC_EMERGENCY_COUNTS = 2;
using Runtime = typename P::Runtime; using Runtime = typename P::Runtime;
using Image = typename P::Image; using Image = typename P::Image;
@ -373,6 +375,7 @@ private:
u64 minimum_memory; u64 minimum_memory;
u64 expected_memory; u64 expected_memory;
u64 critical_memory; u64 critical_memory;
size_t critical_gc;
SlotVector<Image> slot_images; SlotVector<Image> slot_images;
SlotVector<ImageMapView> slot_map_views; SlotVector<ImageMapView> slot_map_views;

View File

@ -13,12 +13,14 @@
#include <vector> #include <vector>
#include "common/assert.h" #include "common/assert.h"
#include "common/literals.h"
#include "common/settings.h" #include "common/settings.h"
#include "video_core/vulkan_common/nsight_aftermath_tracker.h" #include "video_core/vulkan_common/nsight_aftermath_tracker.h"
#include "video_core/vulkan_common/vulkan_device.h" #include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h" #include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan { namespace Vulkan {
using namespace Common::Literals;
namespace { namespace {
namespace Alternatives { namespace Alternatives {
constexpr std::array STENCIL8_UINT{ constexpr std::array STENCIL8_UINT{
@ -585,6 +587,11 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
} }
logical = vk::Device::Create(physical, queue_cis, extensions, first_next, dld); logical = vk::Device::Create(physical, queue_cis, extensions, first_next, dld);
is_integrated = (properties.deviceType & VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU) != 0;
is_virtual = (properties.deviceType & VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU) != 0;
is_non_gpu = (properties.deviceType & VK_PHYSICAL_DEVICE_TYPE_OTHER) != 0 ||
(properties.deviceType & VK_PHYSICAL_DEVICE_TYPE_CPU) != 0;
CollectPhysicalMemoryInfo(); CollectPhysicalMemoryInfo();
CollectTelemetryParameters(); CollectTelemetryParameters();
CollectToolingInfo(); CollectToolingInfo();
@ -957,6 +964,7 @@ std::vector<const char*> Device::LoadExtensions(bool requires_surface) {
test(has_khr_swapchain_mutable_format, VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME, test(has_khr_swapchain_mutable_format, VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME,
false); false);
test(has_ext_line_rasterization, VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME, false); test(has_ext_line_rasterization, VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME, false);
test(ext_memory_budget, VK_EXT_MEMORY_BUDGET_EXTENSION_NAME, true);
if (Settings::values.enable_nsight_aftermath) { if (Settings::values.enable_nsight_aftermath) {
test(nv_device_diagnostics_config, VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME, test(nv_device_diagnostics_config, VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME,
true); true);
@ -969,7 +977,7 @@ std::vector<const char*> Device::LoadExtensions(bool requires_surface) {
VkPhysicalDeviceFeatures2KHR features{}; VkPhysicalDeviceFeatures2KHR features{};
features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR; features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR;
VkPhysicalDeviceProperties2KHR physical_properties; VkPhysicalDeviceProperties2KHR physical_properties{};
physical_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR; physical_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR;
if (has_khr_shader_float16_int8) { if (has_khr_shader_float16_int8) {
@ -1239,15 +1247,51 @@ void Device::CollectTelemetryParameters() {
vendor_name = driver.driverName; vendor_name = driver.driverName;
} }
u64 Device::GetDeviceMemoryUsage() const {
VkPhysicalDeviceMemoryBudgetPropertiesEXT budget;
budget.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT;
budget.pNext = nullptr;
physical.GetMemoryProperties(&budget);
u64 result{};
for (const size_t heap : valid_heap_memory) {
result += budget.heapUsage[heap];
}
return result;
}
void Device::CollectPhysicalMemoryInfo() { void Device::CollectPhysicalMemoryInfo() {
const auto mem_properties = physical.GetMemoryProperties(); VkPhysicalDeviceMemoryBudgetPropertiesEXT budget{};
budget.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT;
const auto mem_info = physical.GetMemoryProperties(ext_memory_budget ? &budget : nullptr);
const auto& mem_properties = mem_info.memoryProperties;
const size_t num_properties = mem_properties.memoryHeapCount; const size_t num_properties = mem_properties.memoryHeapCount;
device_access_memory = 0; device_access_memory = 0;
u64 device_initial_usage = 0;
u64 local_memory = 0;
for (size_t element = 0; element < num_properties; ++element) { for (size_t element = 0; element < num_properties; ++element) {
if ((mem_properties.memoryHeaps[element].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0) { const bool is_heap_local =
device_access_memory += mem_properties.memoryHeaps[element].size; mem_properties.memoryHeaps[element].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT != 0;
if (!is_integrated && !is_heap_local) {
continue;
} }
valid_heap_memory.push_back(element);
if (is_heap_local) {
local_memory += mem_properties.memoryHeaps[element].size;
}
if (ext_memory_budget) {
device_initial_usage += budget.heapUsage[element];
device_access_memory += budget.heapBudget[element];
continue;
}
device_access_memory += mem_properties.memoryHeaps[element].size;
} }
if (!is_integrated) {
return;
}
const s64 available_memory = static_cast<s64>(device_access_memory - device_initial_usage);
device_access_memory = static_cast<u64>(std::max<s64>(
std::min<s64>(available_memory - 8_GiB, 4_GiB), static_cast<s64>(local_memory)));
device_initial_usage = 0;
} }
void Device::CollectToolingInfo() { void Device::CollectToolingInfo() {

View File

@ -342,6 +342,12 @@ public:
return device_access_memory; return device_access_memory;
} }
bool CanReportMemoryUsage() const {
return ext_memory_budget;
}
u64 GetDeviceMemoryUsage() const;
u32 GetSetsPerPool() const { u32 GetSetsPerPool() const {
return sets_per_pool; return sets_per_pool;
} }
@ -418,6 +424,9 @@ private:
bool is_topology_list_restart_supported{}; ///< Support for primitive restart with list bool is_topology_list_restart_supported{}; ///< Support for primitive restart with list
///< topologies. ///< topologies.
bool is_patch_list_restart_supported{}; ///< Support for primitive restart with list patch. bool is_patch_list_restart_supported{}; ///< Support for primitive restart with list patch.
bool is_integrated{}; ///< Is GPU an iGPU.
bool is_virtual{}; ///< Is GPU a virtual GPU.
bool is_non_gpu{}; ///< Is SoftwareRasterizer, FPGA, non-GPU device.
bool nv_viewport_swizzle{}; ///< Support for VK_NV_viewport_swizzle. bool nv_viewport_swizzle{}; ///< Support for VK_NV_viewport_swizzle.
bool nv_viewport_array2{}; ///< Support for VK_NV_viewport_array2. bool nv_viewport_array2{}; ///< Support for VK_NV_viewport_array2.
bool nv_geometry_shader_passthrough{}; ///< Support for VK_NV_geometry_shader_passthrough. bool nv_geometry_shader_passthrough{}; ///< Support for VK_NV_geometry_shader_passthrough.
@ -442,6 +451,7 @@ private:
bool ext_shader_atomic_int64{}; ///< Support for VK_KHR_shader_atomic_int64. bool ext_shader_atomic_int64{}; ///< Support for VK_KHR_shader_atomic_int64.
bool ext_conservative_rasterization{}; ///< Support for VK_EXT_conservative_rasterization. bool ext_conservative_rasterization{}; ///< Support for VK_EXT_conservative_rasterization.
bool ext_provoking_vertex{}; ///< Support for VK_EXT_provoking_vertex. bool ext_provoking_vertex{}; ///< Support for VK_EXT_provoking_vertex.
bool ext_memory_budget{}; ///< Support for VK_EXT_memory_budget.
bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config. bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config.
bool has_broken_cube_compatibility{}; ///< Has broken cube compatiblity bit bool has_broken_cube_compatibility{}; ///< Has broken cube compatiblity bit
bool has_renderdoc{}; ///< Has RenderDoc attached bool has_renderdoc{}; ///< Has RenderDoc attached
@ -452,6 +462,7 @@ private:
// Telemetry parameters // Telemetry parameters
std::string vendor_name; ///< Device's driver name. std::string vendor_name; ///< Device's driver name.
std::vector<std::string> supported_extensions; ///< Reported Vulkan extensions. std::vector<std::string> supported_extensions; ///< Reported Vulkan extensions.
std::vector<size_t> valid_heap_memory; ///< Heaps used.
/// Format properties dictionary. /// Format properties dictionary.
std::unordered_map<VkFormat, VkFormatProperties> format_properties; std::unordered_map<VkFormat, VkFormatProperties> format_properties;

View File

@ -227,7 +227,7 @@ void MemoryCommit::Release() {
} }
MemoryAllocator::MemoryAllocator(const Device& device_, bool export_allocations_) MemoryAllocator::MemoryAllocator(const Device& device_, bool export_allocations_)
: device{device_}, properties{device_.GetPhysical().GetMemoryProperties()}, : device{device_}, properties{device_.GetPhysical().GetMemoryProperties().memoryProperties},
export_allocations{export_allocations_}, export_allocations{export_allocations_},
buffer_image_granularity{ buffer_image_granularity{
device_.GetPhysical().GetProperties().limits.bufferImageGranularity} {} device_.GetPhysical().GetProperties().limits.bufferImageGranularity} {}

View File

@ -239,8 +239,8 @@ bool Load(VkInstance instance, InstanceDispatch& dld) noexcept {
return X(vkCreateDevice) && X(vkDestroyDevice) && X(vkDestroyDevice) && return X(vkCreateDevice) && X(vkDestroyDevice) && X(vkDestroyDevice) &&
X(vkEnumerateDeviceExtensionProperties) && X(vkEnumeratePhysicalDevices) && X(vkEnumerateDeviceExtensionProperties) && X(vkEnumeratePhysicalDevices) &&
X(vkGetDeviceProcAddr) && X(vkGetPhysicalDeviceFormatProperties) && X(vkGetDeviceProcAddr) && X(vkGetPhysicalDeviceFormatProperties) &&
X(vkGetPhysicalDeviceMemoryProperties) && X(vkGetPhysicalDeviceProperties) && X(vkGetPhysicalDeviceMemoryProperties) && X(vkGetPhysicalDeviceMemoryProperties2) &&
X(vkGetPhysicalDeviceQueueFamilyProperties); X(vkGetPhysicalDeviceProperties) && X(vkGetPhysicalDeviceQueueFamilyProperties);
#undef X #undef X
} }
@ -928,9 +928,12 @@ std::vector<VkPresentModeKHR> PhysicalDevice::GetSurfacePresentModesKHR(
return modes; return modes;
} }
VkPhysicalDeviceMemoryProperties PhysicalDevice::GetMemoryProperties() const noexcept { VkPhysicalDeviceMemoryProperties2 PhysicalDevice::GetMemoryProperties(
VkPhysicalDeviceMemoryProperties properties; void* next_structures) const noexcept {
dld->vkGetPhysicalDeviceMemoryProperties(physical_device, &properties); VkPhysicalDeviceMemoryProperties2 properties{};
properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2;
properties.pNext = next_structures;
dld->vkGetPhysicalDeviceMemoryProperties2(physical_device, &properties);
return properties; return properties;
} }

View File

@ -173,6 +173,7 @@ struct InstanceDispatch {
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR{}; PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR{};
PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties{}; PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties{};
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties{}; PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties{};
PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2{};
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties{}; PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties{};
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR{}; PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR{};
PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties{}; PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties{};
@ -951,7 +952,8 @@ public:
std::vector<VkPresentModeKHR> GetSurfacePresentModesKHR(VkSurfaceKHR) const; std::vector<VkPresentModeKHR> GetSurfacePresentModesKHR(VkSurfaceKHR) const;
VkPhysicalDeviceMemoryProperties GetMemoryProperties() const noexcept; VkPhysicalDeviceMemoryProperties2 GetMemoryProperties(
void* next_structures = nullptr) const noexcept;
private: private:
VkPhysicalDevice physical_device = nullptr; VkPhysicalDevice physical_device = nullptr;