mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2024-12-28 02:26:07 +00:00
cache: Invalidate pages for file reads. (#1726)
* cache: Invalidate pages for file reads. * texture_cache: Simplify invalidate intersection check. * vk_rasterizer: Make aware of mapped memory ranges. * buffer_cache: Remove redundant page calculations. Called functions will convert to page numbers/addresses themselves. * file_system: Simplify memory invalidation and add a few missed cases.
This commit is contained in:
parent
e612e881ac
commit
14f7dc3527
|
@ -1,22 +1,22 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <map>
|
||||
#include <ranges>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/singleton.h"
|
||||
#include "core/devices/logger.h"
|
||||
#include "core/devices/nop_device.h"
|
||||
#include "core/file_sys/fs.h"
|
||||
#include "core/libraries/kernel/file_system.h"
|
||||
#include "core/libraries/kernel/orbis_error.h"
|
||||
#include "core/libraries/libs.h"
|
||||
#include "core/memory.h"
|
||||
#include "kernel.h"
|
||||
|
||||
#include <map>
|
||||
#include <ranges>
|
||||
|
||||
#include "core/devices/logger.h"
|
||||
#include "core/devices/nop_device.h"
|
||||
|
||||
namespace D = Core::Devices;
|
||||
using FactoryDevice = std::function<std::shared_ptr<D::BaseDevice>(u32, const char*, int, u16)>;
|
||||
|
||||
|
@ -201,7 +201,7 @@ int PS4_SYSV_ABI posix_close(int d) {
|
|||
return result;
|
||||
}
|
||||
|
||||
size_t PS4_SYSV_ABI sceKernelWrite(int d, const void* buf, size_t nbytes) {
|
||||
s64 PS4_SYSV_ABI sceKernelWrite(int d, const void* buf, size_t nbytes) {
|
||||
auto* h = Common::Singleton<Core::FileSys::HandleTable>::Instance();
|
||||
auto* file = h->GetFile(d);
|
||||
if (file == nullptr) {
|
||||
|
@ -246,6 +246,15 @@ int PS4_SYSV_ABI sceKernelUnlink(const char* path) {
|
|||
return ORBIS_OK;
|
||||
}
|
||||
|
||||
size_t ReadFile(Common::FS::IOFile& file, void* buf, size_t nbytes) {
|
||||
const auto* memory = Core::Memory::Instance();
|
||||
// Invalidate up to the actual number of bytes that could be read.
|
||||
const auto remaining = file.GetSize() - file.Tell();
|
||||
memory->InvalidateMemory(reinterpret_cast<VAddr>(buf), std::min<u64>(nbytes, remaining));
|
||||
|
||||
return file.ReadRaw<u8>(buf, nbytes);
|
||||
}
|
||||
|
||||
size_t PS4_SYSV_ABI _readv(int d, const SceKernelIovec* iov, int iovcnt) {
|
||||
auto* h = Common::Singleton<Core::FileSys::HandleTable>::Instance();
|
||||
auto* file = h->GetFile(d);
|
||||
|
@ -264,7 +273,7 @@ size_t PS4_SYSV_ABI _readv(int d, const SceKernelIovec* iov, int iovcnt) {
|
|||
}
|
||||
size_t total_read = 0;
|
||||
for (int i = 0; i < iovcnt; i++) {
|
||||
total_read += file->f.ReadRaw<u8>(iov[i].iov_base, iov[i].iov_len);
|
||||
total_read += ReadFile(file->f, iov[i].iov_base, iov[i].iov_len);
|
||||
}
|
||||
return total_read;
|
||||
}
|
||||
|
@ -351,7 +360,7 @@ s64 PS4_SYSV_ABI sceKernelRead(int d, void* buf, size_t nbytes) {
|
|||
if (file->type == Core::FileSys::FileType::Device) {
|
||||
return file->device->read(buf, nbytes);
|
||||
}
|
||||
return file->f.ReadRaw<u8>(buf, nbytes);
|
||||
return ReadFile(file->f, buf, nbytes);
|
||||
}
|
||||
|
||||
int PS4_SYSV_ABI posix_read(int d, void* buf, size_t nbytes) {
|
||||
|
@ -541,7 +550,7 @@ s64 PS4_SYSV_ABI sceKernelPreadv(int d, SceKernelIovec* iov, int iovcnt, s64 off
|
|||
}
|
||||
size_t total_read = 0;
|
||||
for (int i = 0; i < iovcnt; i++) {
|
||||
total_read += file->f.ReadRaw<u8>(iov[i].iov_base, iov[i].iov_len);
|
||||
total_read += ReadFile(file->f, iov[i].iov_base, iov[i].iov_len);
|
||||
}
|
||||
return total_read;
|
||||
}
|
||||
|
|
|
@ -65,6 +65,9 @@ constexpr int ORBIS_KERNEL_O_DSYNC = 0x1000;
|
|||
constexpr int ORBIS_KERNEL_O_DIRECT = 0x00010000;
|
||||
constexpr int ORBIS_KERNEL_O_DIRECTORY = 0x00020000;
|
||||
|
||||
s64 PS4_SYSV_ABI sceKernelWrite(int d, const void* buf, size_t nbytes);
|
||||
s64 PS4_SYSV_ABI sceKernelRead(int d, void* buf, size_t nbytes);
|
||||
|
||||
void RegisterFileSystem(Core::Loader::SymbolsResolver* sym);
|
||||
|
||||
} // namespace Libraries::Kernel
|
||||
|
|
|
@ -133,33 +133,11 @@ void PS4_SYSV_ABI sceLibcHeapGetTraceInfo(HeapInfoInfo* info) {
|
|||
}
|
||||
|
||||
s64 PS4_SYSV_ABI ps4__write(int d, const char* buf, std::size_t nbytes) {
|
||||
auto* h = Common::Singleton<Core::FileSys::HandleTable>::Instance();
|
||||
auto* file = h->GetFile(d);
|
||||
if (file == nullptr) {
|
||||
return ORBIS_KERNEL_ERROR_EBADF;
|
||||
}
|
||||
std::scoped_lock lk{file->m_mutex};
|
||||
if (file->type == Core::FileSys::FileType::Device) {
|
||||
return file->device->write(buf, nbytes);
|
||||
}
|
||||
return file->f.WriteRaw<u8>(buf, nbytes);
|
||||
return sceKernelWrite(d, buf, nbytes);
|
||||
}
|
||||
|
||||
s64 PS4_SYSV_ABI ps4__read(int d, void* buf, u64 nbytes) {
|
||||
if (d == 0) {
|
||||
return static_cast<s64>(
|
||||
strlen(std::fgets(static_cast<char*>(buf), static_cast<int>(nbytes), stdin)));
|
||||
}
|
||||
auto* h = Common::Singleton<Core::FileSys::HandleTable>::Instance();
|
||||
auto* file = h->GetFile(d);
|
||||
if (file == nullptr) {
|
||||
return ORBIS_KERNEL_ERROR_EBADF;
|
||||
}
|
||||
std::scoped_lock lk{file->m_mutex};
|
||||
if (file->type == Core::FileSys::FileType::Device) {
|
||||
return file->device->read(buf, nbytes);
|
||||
}
|
||||
return file->f.ReadRaw<u8>(buf, nbytes);
|
||||
return sceKernelRead(d, buf, nbytes);
|
||||
}
|
||||
|
||||
struct OrbisKernelUuid {
|
||||
|
|
|
@ -587,6 +587,12 @@ void MemoryManager::NameVirtualRange(VAddr virtual_addr, size_t size, std::strin
|
|||
it->second.name = name;
|
||||
}
|
||||
|
||||
void MemoryManager::InvalidateMemory(const VAddr addr, const u64 size) const {
|
||||
if (rasterizer) {
|
||||
rasterizer->InvalidateMemory(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
VAddr MemoryManager::SearchFree(VAddr virtual_addr, size_t size, u32 alignment) {
|
||||
// If the requested address is below the mapped range, start search from the lowest address
|
||||
auto min_search_address = impl.SystemManagedVirtualBase();
|
||||
|
|
|
@ -211,6 +211,8 @@ public:
|
|||
|
||||
void NameVirtualRange(VAddr virtual_addr, size_t size, std::string_view name);
|
||||
|
||||
void InvalidateMemory(VAddr addr, u64 size) const;
|
||||
|
||||
private:
|
||||
VMAHandle FindVMA(VAddr target) {
|
||||
return std::prev(vma_map.upper_bound(target));
|
||||
|
|
|
@ -114,8 +114,7 @@ struct PageManager::Impl {
|
|||
|
||||
// Notify rasterizer about the fault.
|
||||
const VAddr addr = msg.arg.pagefault.address;
|
||||
const VAddr addr_page = GetPageAddr(addr);
|
||||
rasterizer->InvalidateMemory(addr, addr_page, PAGESIZE);
|
||||
rasterizer->InvalidateMemory(addr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,17 +134,14 @@ struct PageManager::Impl {
|
|||
}
|
||||
|
||||
void OnMap(VAddr address, size_t size) {
|
||||
owned_ranges += boost::icl::interval<VAddr>::right_open(address, address + size);
|
||||
// No-op
|
||||
}
|
||||
|
||||
void OnUnmap(VAddr address, size_t size) {
|
||||
owned_ranges -= boost::icl::interval<VAddr>::right_open(address, address + size);
|
||||
// No-op
|
||||
}
|
||||
|
||||
void Protect(VAddr address, size_t size, bool allow_write) {
|
||||
ASSERT_MSG(owned_ranges.find(address) != owned_ranges.end(),
|
||||
"Attempted to track non-GPU memory at address {:#x}, size {:#x}.", address,
|
||||
size);
|
||||
auto* memory = Core::Memory::Instance();
|
||||
auto& impl = memory->GetAddressSpace();
|
||||
impl.Protect(address, size,
|
||||
|
@ -155,17 +151,13 @@ struct PageManager::Impl {
|
|||
|
||||
static bool GuestFaultSignalHandler(void* context, void* fault_address) {
|
||||
const auto addr = reinterpret_cast<VAddr>(fault_address);
|
||||
const bool is_write = Common::IsWriteError(context);
|
||||
if (is_write && owned_ranges.find(addr) != owned_ranges.end()) {
|
||||
const VAddr addr_aligned = GetPageAddr(addr);
|
||||
rasterizer->InvalidateMemory(addr, addr_aligned, PAGESIZE);
|
||||
return true;
|
||||
if (Common::IsWriteError(context)) {
|
||||
return rasterizer->InvalidateMemory(addr, 1);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline static Vulkan::Rasterizer* rasterizer;
|
||||
inline static boost::icl::interval_set<VAddr> owned_ranges;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -210,6 +202,9 @@ void PageManager::UpdatePagesCachedCount(VAddr addr, u64 size, s32 delta) {
|
|||
const VAddr interval_start_addr = boost::icl::first(interval) << PageShift;
|
||||
const VAddr interval_end_addr = boost::icl::last_next(interval) << PageShift;
|
||||
const u32 interval_size = interval_end_addr - interval_start_addr;
|
||||
ASSERT_MSG(rasterizer->IsMapped(interval_start_addr, interval_size),
|
||||
"Attempted to track non-GPU memory at address {:#x}, size {:#x}.",
|
||||
interval_start_addr, interval_size);
|
||||
if (delta > 0 && count == delta) {
|
||||
impl->Protect(interval_start_addr, interval_size, false);
|
||||
} else if (delta < 0 && count == -delta) {
|
||||
|
|
|
@ -841,12 +841,27 @@ u32 Rasterizer::ReadDataFromGds(u32 gds_offset) {
|
|||
return value;
|
||||
}
|
||||
|
||||
void Rasterizer::InvalidateMemory(VAddr addr, VAddr addr_aligned, u64 size) {
|
||||
buffer_cache.InvalidateMemory(addr_aligned, size);
|
||||
texture_cache.InvalidateMemory(addr, addr_aligned, size);
|
||||
bool Rasterizer::InvalidateMemory(VAddr addr, u64 size) {
|
||||
if (!IsMapped(addr, size)) {
|
||||
// Not GPU mapped memory, can skip invalidation logic entirely.
|
||||
return false;
|
||||
}
|
||||
buffer_cache.InvalidateMemory(addr, size);
|
||||
texture_cache.InvalidateMemory(addr, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Rasterizer::IsMapped(VAddr addr, u64 size) {
|
||||
if (size == 0) {
|
||||
// There is no memory, so not mapped.
|
||||
return false;
|
||||
}
|
||||
return mapped_ranges.find(boost::icl::interval<VAddr>::right_open(addr, addr + size)) !=
|
||||
mapped_ranges.end();
|
||||
}
|
||||
|
||||
void Rasterizer::MapMemory(VAddr addr, u64 size) {
|
||||
mapped_ranges += boost::icl::interval<VAddr>::right_open(addr, addr + size);
|
||||
page_manager.OnGpuMap(addr, size);
|
||||
}
|
||||
|
||||
|
@ -854,6 +869,7 @@ void Rasterizer::UnmapMemory(VAddr addr, u64 size) {
|
|||
buffer_cache.InvalidateMemory(addr, size);
|
||||
texture_cache.UnmapMemory(addr, size);
|
||||
page_manager.OnGpuUnmap(addr, size);
|
||||
mapped_ranges -= boost::icl::interval<VAddr>::right_open(addr, addr + size);
|
||||
}
|
||||
|
||||
void Rasterizer::UpdateDynamicState(const GraphicsPipeline& pipeline) {
|
||||
|
|
|
@ -54,7 +54,8 @@ public:
|
|||
|
||||
void InlineData(VAddr address, const void* value, u32 num_bytes, bool is_gds);
|
||||
u32 ReadDataFromGds(u32 gsd_offset);
|
||||
void InvalidateMemory(VAddr addr, VAddr addr_aligned, u64 size);
|
||||
bool InvalidateMemory(VAddr addr, u64 size);
|
||||
bool IsMapped(VAddr addr, u64 size);
|
||||
void MapMemory(VAddr addr, u64 size);
|
||||
void UnmapMemory(VAddr addr, u64 size);
|
||||
|
||||
|
@ -100,6 +101,7 @@ private:
|
|||
VideoCore::TextureCache texture_cache;
|
||||
AmdGpu::Liverpool* liverpool;
|
||||
Core::MemoryManager* memory;
|
||||
boost::icl::interval_set<VAddr> mapped_ranges;
|
||||
PipelineCache pipeline_cache;
|
||||
|
||||
boost::container::static_vector<
|
||||
|
|
|
@ -56,24 +56,27 @@ void TextureCache::MarkAsMaybeDirty(ImageId image_id, Image& image) {
|
|||
UntrackImage(image_id);
|
||||
}
|
||||
|
||||
void TextureCache::InvalidateMemory(VAddr addr, VAddr page_addr, size_t size) {
|
||||
void TextureCache::InvalidateMemory(VAddr addr, size_t size) {
|
||||
std::scoped_lock lock{mutex};
|
||||
ForEachImageInRegion(page_addr, size, [&](ImageId image_id, Image& image) {
|
||||
const auto end = addr + size;
|
||||
const auto pages_start = PageManager::GetPageAddr(addr);
|
||||
const auto pages_end = PageManager::GetNextPageAddr(addr + size - 1);
|
||||
ForEachImageInRegion(pages_start, pages_end - pages_start, [&](ImageId image_id, Image& image) {
|
||||
const auto image_begin = image.info.guest_address;
|
||||
const auto image_end = image.info.guest_address + image.info.guest_size_bytes;
|
||||
const auto page_end = page_addr + size;
|
||||
if (image_begin <= addr && addr < image_end) {
|
||||
// This image was definitely accessed by this page fault.
|
||||
// Untrack image, so the range is unprotected and the guest can write freely
|
||||
if (image_begin < end && addr < image_end) {
|
||||
// Start or end of the modified region is in the image, or the image is entirely within
|
||||
// the modified region, so the image was definitely accessed by this page fault.
|
||||
// Untrack the image, so that the range is unprotected and the guest can write freely.
|
||||
image.flags |= ImageFlagBits::CpuDirty;
|
||||
UntrackImage(image_id);
|
||||
} else if (page_end < image_end) {
|
||||
} else if (pages_end < image_end) {
|
||||
// This page access may or may not modify the image.
|
||||
// We should not mark it as dirty now. If it really was modified
|
||||
// it will receive more invalidations on its other pages.
|
||||
// Remove tracking from this page only.
|
||||
UntrackImageHead(image_id);
|
||||
} else if (image_begin < page_addr) {
|
||||
} else if (image_begin < pages_start) {
|
||||
// This page access does not modify the image but the page should be untracked.
|
||||
// We should not mark this image as dirty now. If it really was modified
|
||||
// it will receive more invalidations on its other pages.
|
||||
|
|
|
@ -95,7 +95,7 @@ public:
|
|||
~TextureCache();
|
||||
|
||||
/// Invalidates any image in the logical page range.
|
||||
void InvalidateMemory(VAddr addr, VAddr page_addr, size_t size);
|
||||
void InvalidateMemory(VAddr addr, size_t size);
|
||||
|
||||
/// Marks an image as dirty if it exists at the provided address.
|
||||
void InvalidateMemoryFromGPU(VAddr address, size_t max_size);
|
||||
|
|
Loading…
Reference in a new issue