Rebase of "Handle munmap over multiple VMAs" (#2233)
Some checks are pending
Build and Release / reuse (push) Waiting to run
Build and Release / clang-format (push) Waiting to run
Build and Release / get-info (push) Waiting to run
Build and Release / windows-sdl (push) Blocked by required conditions
Build and Release / windows-qt (push) Blocked by required conditions
Build and Release / macos-sdl (push) Blocked by required conditions
Build and Release / macos-qt (push) Blocked by required conditions
Build and Release / linux-sdl (push) Blocked by required conditions
Build and Release / linux-qt (push) Blocked by required conditions
Build and Release / linux-sdl-gcc (push) Blocked by required conditions
Build and Release / linux-qt-gcc (push) Blocked by required conditions
Build and Release / pre-release (push) Blocked by required conditions

* Unmap memory in chunks if spanning over multiple VMAs

* clang

* Merge fixups

* Minor code style changes

* Update function declarations

---------

Co-authored-by: Marcin Mikołajczyk <marcinmikolajcz@gmail.com>
This commit is contained in:
kalaposfos13 2025-01-24 14:30:55 +01:00 committed by GitHub
parent 4d12de8149
commit 4f426b723f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 35 additions and 18 deletions

View file

@ -389,32 +389,29 @@ s32 MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
return UnmapMemoryImpl(virtual_addr, size);
}
s32 MemoryManager::UnmapMemoryImpl(VAddr virtual_addr, size_t size) {
const auto it = FindVMA(virtual_addr);
const auto& vma_base = it->second;
ASSERT_MSG(vma_base.Contains(virtual_addr, size),
"Existing mapping does not contain requested unmap range");
const auto type = vma_base.type;
if (type == VMAType::Free) {
return ORBIS_OK;
}
u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma_base, u64 size) {
const auto vma_base_addr = vma_base.base;
const auto vma_base_size = vma_base.size;
const auto type = vma_base.type;
const auto phys_base = vma_base.phys_base;
const bool is_exec = vma_base.is_exec;
const auto start_in_vma = virtual_addr - vma_base_addr;
const auto adjusted_size =
vma_base_size - start_in_vma < size ? vma_base_size - start_in_vma : size;
const bool has_backing = type == VMAType::Direct || type == VMAType::File;
if (type == VMAType::Free) {
return adjusted_size;
}
if (type == VMAType::Direct || type == VMAType::Pooled) {
rasterizer->UnmapMemory(virtual_addr, size);
rasterizer->UnmapMemory(virtual_addr, adjusted_size);
}
if (type == VMAType::Flexible) {
flexible_usage -= size;
flexible_usage -= adjusted_size;
}
// Mark region as free and attempt to coalesce it with neighbours.
const auto new_it = CarveVMA(virtual_addr, size);
const auto new_it = CarveVMA(virtual_addr, adjusted_size);
auto& vma = new_it->second;
vma.type = VMAType::Free;
vma.prot = MemoryProt::NoAccess;
@ -423,13 +420,25 @@ s32 MemoryManager::UnmapMemoryImpl(VAddr virtual_addr, size_t size) {
vma.name = "";
MergeAdjacent(vma_map, new_it);
bool readonly_file = vma.prot == MemoryProt::CpuRead && type == VMAType::File;
if (type != VMAType::Reserved && type != VMAType::PoolReserved) {
// Unmap the memory region.
impl.Unmap(vma_base_addr, vma_base_size, start_in_vma, start_in_vma + size, phys_base,
is_exec, has_backing, readonly_file);
impl.Unmap(vma_base_addr, vma_base_size, start_in_vma, start_in_vma + adjusted_size,
phys_base, is_exec, has_backing, readonly_file);
TRACK_FREE(virtual_addr, "VMEM");
}
return adjusted_size;
}
s32 MemoryManager::UnmapMemoryImpl(VAddr virtual_addr, u64 size) {
u64 unmapped_bytes = 0;
do {
auto it = FindVMA(virtual_addr + unmapped_bytes);
auto& vma_base = it->second;
auto unmapped =
UnmapBytesFromEntry(virtual_addr + unmapped_bytes, vma_base, size - unmapped_bytes);
ASSERT_MSG(unmapped > 0, "Failed to unmap memory, progress is impossible");
unmapped_bytes += unmapped;
} while (unmapped_bytes < size);
return ORBIS_OK;
}
@ -651,6 +660,12 @@ MemoryManager::VMAHandle MemoryManager::CarveVMA(VAddr virtual_addr, size_t size
const VAddr start_in_vma = virtual_addr - vma.base;
const VAddr end_in_vma = start_in_vma + size;
if (start_in_vma == 0 && size == vma.size) {
// if requsting the whole VMA, return it
return vma_handle;
}
ASSERT_MSG(end_in_vma <= vma.size, "Mapping cannot fit inside free region");
if (end_in_vma != vma.size) {

View file

@ -252,7 +252,9 @@ private:
DMemHandle Split(DMemHandle dmem_handle, size_t offset_in_area);
s32 UnmapMemoryImpl(VAddr virtual_addr, size_t size);
u64 UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma_base, u64 size);
s32 UnmapMemoryImpl(VAddr virtual_addr, u64 size);
private:
AddressSpace impl;