early-access version 2606

This commit is contained in:
pineappleEA
2022-03-25 07:21:10 +01:00
parent c56a867536
commit b3c57a4768
15 changed files with 686 additions and 308 deletions

View File

@@ -322,7 +322,7 @@ struct Memory::Impl {
}
if (Settings::IsFastmemEnabled()) {
const bool is_read_enable = Settings::IsGPULevelHigh() || !cached;
const bool is_read_enable = !Settings::IsGPULevelExtreme() || !cached;
system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
}

View File

@@ -136,21 +136,17 @@ BufferCacheRuntime::BufferCacheRuntime(const Device& device_)
glNamedBufferData(buffer.handle, 0x10'000, nullptr, GL_STREAM_COPY);
}
device_access_memory = []() -> u64 {
if (GLAD_GL_NVX_gpu_memory_info) {
GLint cur_avail_mem_kb = 0;
glGetIntegerv(GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX, &cur_avail_mem_kb);
return static_cast<u64>(cur_avail_mem_kb) * 1_KiB;
device_access_memory = [this]() -> u64 {
if (device.CanReportMemoryUsage()) {
return device.GetCurrentDedicatedVideoMemory() + 512_MiB;
}
return 2_GiB; // Return minimum requirements
}();
}
u64 BufferCacheRuntime::GetDeviceMemoryUsage() const {
if (GLAD_GL_NVX_gpu_memory_info) {
GLint cur_avail_mem_kb = 0;
glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &cur_avail_mem_kb);
return device_access_memory - static_cast<u64>(cur_avail_mem_kb) * 1_KiB;
if (device.CanReportMemoryUsage()) {
return device_access_memory - device.GetCurrentDedicatedVideoMemory();
}
return 2_GiB;
}

View File

@@ -89,6 +89,8 @@ public:
void BindImageBuffer(Buffer& buffer, u32 offset, u32 size,
VideoCore::Surface::PixelFormat format);
u64 GetDeviceMemoryUsage() const;
void BindFastUniformBuffer(size_t stage, u32 binding_index, u32 size) {
const GLuint handle = fast_uniforms[stage][binding_index].handle;
const GLsizeiptr gl_size = static_cast<GLsizeiptr>(size);
@@ -155,10 +157,8 @@ public:
return device_access_memory;
}
u64 GetDeviceMemoryUsage() const;
bool CanReportMemoryUsage() const {
return GLAD_GL_NVX_gpu_memory_info;
return device.CanReportMemoryUsage();
}
private:

View File

@@ -13,12 +13,15 @@
#include <glad/glad.h>
#include "common/literals.h"
#include "common/logging/log.h"
#include "common/settings.h"
#include "shader_recompiler/stage.h"
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
using namespace Common::Literals;
namespace OpenGL {
namespace {
constexpr std::array LIMIT_UBOS = {
@@ -165,6 +168,7 @@ Device::Device() {
has_sparse_texture_2 = GLAD_GL_ARB_sparse_texture2;
warp_size_potentially_larger_than_guest = !is_nvidia && !is_intel;
need_fastmath_off = is_nvidia;
can_report_memory = GLAD_GL_NVX_gpu_memory_info;
// At the moment of writing this, only Nvidia's driver optimizes BufferSubData on exclusive
// uniform buffers as "push constants"
@@ -276,4 +280,10 @@ void main() {
})");
}
u64 Device::GetCurrentDedicatedVideoMemory() const {
GLint cur_avail_mem_kb = 0;
glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &cur_avail_mem_kb);
return static_cast<u64>(cur_avail_mem_kb) * 1_KiB;
}
} // namespace OpenGL

View File

@@ -20,6 +20,8 @@ public:
[[nodiscard]] std::string GetVendorName() const;
u64 GetCurrentDedicatedVideoMemory() const;
u32 GetMaxUniformBuffers(Shader::Stage stage) const noexcept {
return max_uniform_buffers[static_cast<size_t>(stage)];
}
@@ -168,6 +170,10 @@ public:
return vendor_name == "ATI Technologies Inc.";
}
bool CanReportMemoryUsage() const {
return can_report_memory;
}
private:
static bool TestVariableAoffi();
static bool TestPreciseBug();
@@ -210,6 +216,7 @@ private:
bool need_fastmath_off{};
bool has_cbuf_ftou_bug{};
bool has_bool_ref_bug{};
bool can_report_memory{};
std::string vendor_name;
};

View File

@@ -352,7 +352,7 @@ void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
shader_cache.OnCPUWrite(addr, size);
{
std::scoped_lock lock{texture_cache.mutex};
texture_cache.WriteMemory(addr, size);
texture_cache.CachedWriteMemory(addr, size);
}
{
std::scoped_lock lock{buffer_cache.mutex};
@@ -363,6 +363,10 @@ void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
void RasterizerOpenGL::SyncGuestHost() {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
shader_cache.SyncGuestHost();
{
std::scoped_lock lock{texture_cache.mutex};
texture_cache.FlushCachedWrites();
}
{
std::scoped_lock lock{buffer_cache.mutex};
buffer_cache.FlushCachedWrites();

View File

@@ -485,11 +485,9 @@ TextureCacheRuntime::TextureCacheRuntime(const Device& device_, ProgramManager&
}
}
device_access_memory = []() -> u64 {
if (GLAD_GL_NVX_gpu_memory_info) {
GLint cur_avail_mem_kb = 0;
glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &cur_avail_mem_kb);
return static_cast<u64>(cur_avail_mem_kb) * 1_KiB + 512_MiB;
device_access_memory = [this]() -> u64 {
if (device.CanReportMemoryUsage()) {
return device.GetCurrentDedicatedVideoMemory() + 512_MiB;
}
return 2_GiB; // Return minimum requirements
}();
@@ -510,10 +508,8 @@ ImageBufferMap TextureCacheRuntime::DownloadStagingBuffer(size_t size) {
}
u64 TextureCacheRuntime::GetDeviceMemoryUsage() const {
if (GLAD_GL_NVX_gpu_memory_info) {
GLint cur_avail_mem_kb = 0;
glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &cur_avail_mem_kb);
return device_access_memory - static_cast<u64>(cur_avail_mem_kb) * 1_KiB;
if (device.CanReportMemoryUsage()) {
return device_access_memory - device.GetCurrentDedicatedVideoMemory();
}
return 2_GiB;
}

View File

@@ -10,6 +10,7 @@
#include <glad/glad.h>
#include "shader_recompiler/shader_info.h"
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/util_shaders.h"
#include "video_core/texture_cache/image_view_base.h"
@@ -21,7 +22,6 @@ struct ResolutionScalingInfo;
namespace OpenGL {
class Device;
class ProgramManager;
class StateTracker;
@@ -90,7 +90,7 @@ public:
u64 GetDeviceMemoryUsage() const;
bool CanReportMemoryUsage() const {
return GLAD_GL_NVX_gpu_memory_info;
return device.CanReportMemoryUsage();
}
bool ShouldReinterpret([[maybe_unused]] Image& dst, [[maybe_unused]] Image& src) {

View File

@@ -408,7 +408,7 @@ void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
pipeline_cache.OnCPUWrite(addr, size);
{
std::scoped_lock lock{texture_cache.mutex};
texture_cache.WriteMemory(addr, size);
texture_cache.CachedWriteMemory(addr, size);
}
{
std::scoped_lock lock{buffer_cache.mutex};
@@ -418,6 +418,10 @@ void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
void RasterizerVulkan::SyncGuestHost() {
pipeline_cache.SyncGuestHost();
{
std::scoped_lock lock{texture_cache.mutex};
texture_cache.FlushCachedWrites();
}
{
std::scoped_lock lock{buffer_cache.mutex};
buffer_cache.FlushCachedWrites();

View File

@@ -39,6 +39,9 @@ enum class ImageFlagBits : u32 {
Rescaled = 1 << 13,
CheckingRescalable = 1 << 14,
IsRescalable = 1 << 15,
// Cached CPU
CachedCpuModified = 1 << 16, ///< Contents have been modified from the CPU
};
DECLARE_ENUM_FLAG_OPERATORS(ImageFlagBits)

View File

@@ -437,6 +437,23 @@ void TextureCache<P>::WriteMemory(VAddr cpu_addr, size_t size) {
});
}
template <class P>
void TextureCache<P>::CachedWriteMemory(VAddr cpu_addr, size_t size) {
const VAddr new_cpu_addr = Common::AlignDown(cpu_addr, CPU_PAGE_SIZE);
const size_t new_size = Common::AlignUp(size + cpu_addr - new_cpu_addr, CPU_PAGE_SIZE);
ForEachImageInRegion(new_cpu_addr, new_size, [this](ImageId image_id, Image& image) {
if (True(image.flags & ImageFlagBits::CachedCpuModified)) {
return;
}
image.flags |= ImageFlagBits::CachedCpuModified;
cached_cpu_invalidate.insert(image_id);
if (True(image.flags & ImageFlagBits::Tracked)) {
UntrackImage(image, image_id);
}
});
}
template <class P>
void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
std::vector<ImageId> images;
@@ -494,6 +511,18 @@ void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) {
}
}
template <class P>
void TextureCache<P>::FlushCachedWrites() {
for (ImageId image_id : cached_cpu_invalidate) {
Image& image = slot_images[image_id];
if (True(image.flags & ImageFlagBits::CachedCpuModified)) {
image.flags &= ~ImageFlagBits::CachedCpuModified;
image.flags |= ImageFlagBits::CpuModified;
}
}
cached_cpu_invalidate.clear();
}
template <class P>
void TextureCache<P>::BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
const Tegra::Engines::Fermi2D::Surface& src,
@@ -1560,6 +1589,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
template <class P>
void TextureCache<P>::TrackImage(ImageBase& image, ImageId image_id) {
ASSERT(False(image.flags & ImageFlagBits::Tracked));
if (True(image.flags & ImageFlagBits::CachedCpuModified)) {
return;
}
image.flags |= ImageFlagBits::Tracked;
if (False(image.flags & ImageFlagBits::Sparse)) {
rasterizer.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, 1);
@@ -1616,6 +1648,9 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format);
}
total_used_memory -= Common::AlignUp(tentative_size, 1024);
if (True(image.flags & ImageFlagBits::CachedCpuModified)) {
cached_cpu_invalidate.erase(image_id);
}
const GPUVAddr gpu_addr = image.gpu_addr;
const auto alloc_it = image_allocs_table.find(gpu_addr);
if (alloc_it == image_allocs_table.end()) {
@@ -1782,7 +1817,11 @@ template <class P>
void TextureCache<P>::PrepareImage(ImageId image_id, bool is_modification, bool invalidate) {
Image& image = slot_images[image_id];
if (invalidate) {
image.flags &= ~(ImageFlagBits::CpuModified | ImageFlagBits::GpuModified);
if (True(image.flags & ImageFlagBits::CachedCpuModified)) {
cached_cpu_invalidate.erase(image_id);
}
image.flags &= ~(ImageFlagBits::CpuModified | ImageFlagBits::GpuModified |
ImageFlagBits::CachedCpuModified);
if (False(image.flags & ImageFlagBits::Tracked)) {
TrackImage(image, image_id);
}

View File

@@ -8,6 +8,7 @@
#include <span>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <queue>
@@ -50,6 +51,9 @@ class TextureCache {
/// Address shift for caching images into a hash table
static constexpr u64 PAGE_BITS = 20;
static constexpr u64 CPU_PAGE_BITS = 12;
static constexpr u64 CPU_PAGE_SIZE = 1ULL << CPU_PAGE_BITS;
/// Enables debugging features to the texture cache
static constexpr bool ENABLE_VALIDATION = P::ENABLE_VALIDATION;
/// Implement blits as copies between framebuffers
@@ -136,6 +140,9 @@ public:
/// Mark images in a range as modified from the CPU
void WriteMemory(VAddr cpu_addr, size_t size);
/// Mark images in a range as modified from the CPU
void CachedWriteMemory(VAddr cpu_addr, size_t size);
/// Download contents of host images to guest memory in a region
void DownloadMemory(VAddr cpu_addr, size_t size);
@@ -145,6 +152,8 @@ public:
/// Remove images in a region
void UnmapGPUMemory(GPUVAddr gpu_addr, size_t size);
void FlushCachedWrites();
/// Blit an image with the given parameters
void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
const Tegra::Engines::Fermi2D::Surface& src,
@@ -366,6 +375,8 @@ private:
std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views;
std::unordered_set<ImageId> cached_cpu_invalidate;
VAddr virtual_invalid_space{};
bool has_deleted_images = false;

View File

@@ -1324,7 +1324,6 @@ void Device::CollectPhysicalMemoryInfo() {
const s64 available_memory = static_cast<s64>(device_access_memory - device_initial_usage);
device_access_memory = static_cast<u64>(std::max<s64>(
std::min<s64>(available_memory - 8_GiB, 4_GiB), static_cast<s64>(local_memory)));
device_initial_usage = 0;
}
void Device::CollectToolingInfo() {