diff --git a/src/core/address_space.cpp b/src/core/address_space.cpp index 23511370..0dd7a76f 100644 --- a/src/core/address_space.cpp +++ b/src/core/address_space.cpp @@ -15,6 +15,7 @@ #include #include #endif +#include "libraries/error_codes.h" #ifdef __APPLE__ // Reserve space for the system address space using a zerofill section. @@ -231,27 +232,36 @@ struct AddressSpace::Impl { void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) { DWORD new_flags{}; - if (read && write) { + + if (read && write && execute) { + new_flags = PAGE_EXECUTE_READWRITE; + } else if (read && write) { new_flags = PAGE_READWRITE; } else if (read && !write) { new_flags = PAGE_READONLY; - } else if (!read && !write) { + } else if (execute && !read && not write) { + new_flags = PAGE_EXECUTE; + } else if (!read && !write && !execute) { new_flags = PAGE_NOACCESS; } else { - UNIMPLEMENTED_MSG("Protection flag combination read={} write={}", read, write); + LOG_CRITICAL(Common_Memory, + "Unsupported protection flag combination for address {:#x}, size {}", + virtual_addr, size); + return; } - const VAddr virtual_end = virtual_addr + size; - auto [it, end] = placeholders.equal_range({virtual_addr, virtual_end}); - while (it != end) { - const size_t offset = std::max(it->lower(), virtual_addr); - const size_t protect_length = std::min(it->upper(), virtual_end) - offset; - DWORD old_flags{}; - if (!VirtualProtect(virtual_base + offset, protect_length, new_flags, &old_flags)) { - LOG_CRITICAL(Common_Memory, "Failed to change virtual memory protect rules"); - } - ++it; + DWORD old_flags{}; + bool success = + VirtualProtect(reinterpret_cast(virtual_addr), size, new_flags, &old_flags); + + if (!success) { + LOG_ERROR(Common_Memory, + "Failed to change virtual memory protection for address {:#x}, size {}", + virtual_addr, size); } + + // Use assert to ensure success in debug builds + DEBUG_ASSERT(success && "Failed to change virtual memory protection"); } HANDLE process{}; @@ -493,7 +503,10 @@ void AddressSpace::Unmap(VAddr virtual_addr, size_t size, VAddr start_in_vma, VA } void AddressSpace::Protect(VAddr virtual_addr, size_t size, MemoryPermission perms) { - return impl->Protect(virtual_addr, size, true, true, true); + const bool read = True(perms & MemoryPermission::Read); + const bool write = True(perms & MemoryPermission::Write); + const bool execute = True(perms & MemoryPermission::Execute); + return impl->Protect(virtual_addr, size, read, write, execute); } } // namespace Core diff --git a/src/core/libraries/kernel/libkernel.cpp b/src/core/libraries/kernel/libkernel.cpp index 2634e25c..d56f4dc4 100644 --- a/src/core/libraries/kernel/libkernel.cpp +++ b/src/core/libraries/kernel/libkernel.cpp @@ -454,6 +454,8 @@ void LibKernel_Register(Core::Loader::SymbolsResolver* sym) { LIB_FUNCTION("F6e0kwo4cnk", "libkernel", 1, "libkernel", 1, 1, sceKernelTriggerUserEvent); LIB_FUNCTION("LJDwdSNTnDg", "libkernel", 1, "libkernel", 1, 1, sceKernelDeleteUserEvent); LIB_FUNCTION("mJ7aghmgvfc", "libkernel", 1, "libkernel", 1, 1, sceKernelGetEventId); + LIB_FUNCTION("9bfdLIyuwCY", "libkernel", 1, "libkernel", 1, 1, sceKernelMTypeProtect); + LIB_FUNCTION("vSMAm3cxYTY", "libkernel", 1, "libkernel", 1, 1, sceKernelMProtect); LIB_FUNCTION("23CPPI1tyBY", "libkernel", 1, "libkernel", 1, 1, sceKernelGetEventFilter); // misc diff --git a/src/core/libraries/kernel/memory_management.cpp b/src/core/libraries/kernel/memory_management.cpp index a5288a65..08cd106d 100644 --- a/src/core/libraries/kernel/memory_management.cpp +++ b/src/core/libraries/kernel/memory_management.cpp @@ -7,6 +7,7 @@ #include "common/assert.h" #include "common/logging/log.h" #include "common/singleton.h" +#include "core/address_space.h" #include "core/libraries/error_codes.h" #include "core/libraries/kernel/memory_management.h" #include "core/linker.h" @@ -218,6 +219,19 @@ int PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void** return memory->QueryProtection(std::bit_cast(addr), start, end, prot); } +int PS4_SYSV_ABI sceKernelMProtect(const void* addr, size_t size, int prot) { + Core::MemoryManager* memory_manager = Core::Memory::Instance(); + Core::MemoryProt protection_flags = static_cast(prot); + return memory_manager->Protect(std::bit_cast(addr), size, protection_flags); +} + +int PS4_SYSV_ABI sceKernelMTypeProtect(const void* addr, size_t size, int mtype, int prot) { + Core::MemoryManager* memory_manager = Core::Memory::Instance(); + Core::MemoryProt protection_flags = static_cast(prot); + return memory_manager->MTypeProtect(std::bit_cast(addr), size, + static_cast(mtype), protection_flags); +} + int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info, size_t infoSize) { LOG_WARNING(Kernel_Vmm, "called offset = {:#x}, flags = {:#x}", offset, flags); @@ -258,7 +272,7 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEn int* numEntriesOut, int flags) { int result = ORBIS_OK; int processed = 0; - for (int i = 0; i < numEntries; i++, processed++) { + for (int i = 0; i < numEntries; i++) { if (entries == nullptr || entries[i].length == 0 || entries[i].operation > 4) { result = ORBIS_KERNEL_ERROR_EINVAL; break; // break and assign a value to numEntriesOut. @@ -278,10 +292,41 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEn } case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_UNMAP: { result = sceKernelMunmap(entries[i].start, entries[i].length); - LOG_INFO(Kernel_Vmm, "entry = {}, operation = {}, len = {:#x}, result = {}", i, - entries[i].operation, entries[i].length, result); + LOG_INFO(Kernel_Vmm, "BatchMap: entry = {}, operation = {}, len = {:#x}, result = {}", + i, entries[i].operation, entries[i].length, result); + + if (result == 0) + processed++; + } + case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_PROTECT: { + result = sceKernelMProtect(entries[i].start, entries[i].length, entries[i].protection); + LOG_INFO(Kernel_Vmm, "BatchMap: entry = {}, operation = {}, len = {:#x}, result = {}", + i, entries[i].operation, entries[i].length, result); + if (result != ORBIS_OK) { + LOG_ERROR(Kernel_Vmm, "BatchMap: MProtect failed on entry {} with result {}", i, + result); + } + if (result == 0) { + processed++; + } break; } + + case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_TYPE_PROTECT: { + result = sceKernelMTypeProtect(entries[i].start, entries[i].length, entries[i].type, + entries[i].protection); + LOG_INFO(Kernel_Vmm, "BatchMap: entry = {}, operation = {}, len = {:#x}, result = {}", + i, entries[i].operation, entries[i].length, result); + if (result != ORBIS_OK) { + LOG_ERROR(Kernel_Vmm, "BatchMap: MProtect failed on entry {} with result {}", i, + result); + } + if (result == 0) { + processed++; + } + break; + } + case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_MAP_FLEXIBLE: { result = sceKernelMapNamedFlexibleMemory(&entries[i].start, entries[i].length, entries[i].protection, flags, ""); @@ -291,14 +336,7 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEn i, entries[i].operation, entries[i].length, (u8)entries[i].type, result); break; } - case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_TYPE_PROTECT: { - // By now, ignore protection and log it instead - LOG_WARNING(Kernel_Vmm, - "entry = {}, operation = {}, len = {:#x}, type = {} " - "is UNSUPPORTED and skipped", - i, entries[i].operation, entries[i].length, (u8)entries[i].type); - break; - } + default: { UNREACHABLE(); } @@ -308,6 +346,8 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEn break; } } + LOG_INFO(Kernel_Vmm, "sceKernelBatchMap2 finished: processed = {}, result = {}", processed, + result); if (numEntriesOut != NULL) { // can be zero. do not return an error code. *numEntriesOut = processed; } diff --git a/src/core/libraries/kernel/memory_management.h b/src/core/libraries/kernel/memory_management.h index 761cb084..205b2274 100644 --- a/src/core/libraries/kernel/memory_management.h +++ b/src/core/libraries/kernel/memory_management.h @@ -95,6 +95,10 @@ s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len, int flags); int PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void** end, u32* prot); +int PS4_SYSV_ABI sceKernelMProtect(const void* addr, size_t size, int prot); + +int PS4_SYSV_ABI sceKernelMTypeProtect(const void* addr, size_t size, int mtype, int prot); + int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info, size_t infoSize); s32 PS4_SYSV_ABI sceKernelAvailableFlexibleMemorySize(size_t* sizeOut); diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 64075147..44f96a00 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -7,6 +7,7 @@ #include "core/libraries/error_codes.h" #include "core/libraries/kernel/memory_management.h" #include "core/memory.h" +#include "video_core/renderer_vulkan/vk_instance.h" #include "video_core/renderer_vulkan/vk_rasterizer.h" namespace Core { @@ -292,6 +293,118 @@ int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* pr return ORBIS_OK; } +int MemoryManager::Protect(VAddr addr, size_t size, MemoryProt prot) { + std::scoped_lock lk{mutex}; + + // Find the virtual memory area that contains the specified address range. + auto it = FindVMA(addr); + if (it == vma_map.end() || !it->second.Contains(addr, size)) { + LOG_ERROR(Core, "Address range not mapped"); + return ORBIS_KERNEL_ERROR_EINVAL; + } + + VirtualMemoryArea& vma = it->second; + if (vma.type == VMAType::Free) { + LOG_ERROR(Core, "Cannot change protection on free memory region"); + return ORBIS_KERNEL_ERROR_EINVAL; + } + + // Validate protection flags + constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead | + MemoryProt::CpuReadWrite | MemoryProt::GpuRead | + MemoryProt::GpuWrite | MemoryProt::GpuReadWrite; + + MemoryProt invalid_flags = prot & ~valid_flags; + if (u32(invalid_flags) != 0 && u32(invalid_flags) != u32(MemoryProt::NoAccess)) { + LOG_ERROR(Core, "Invalid protection flags: prot = {:#x}, invalid flags = {:#x}", u32(prot), + u32(invalid_flags)); + return ORBIS_KERNEL_ERROR_EINVAL; + } + + // Change protection + vma.prot = prot; + + // Set permissions + Core::MemoryPermission perms{}; + + if (True(prot & MemoryProt::CpuRead)) { + perms |= Core::MemoryPermission::Read; + } + if (True(prot & MemoryProt::CpuReadWrite)) { + perms |= Core::MemoryPermission::ReadWrite; + } + if (True(prot & MemoryProt::GpuRead)) { + perms |= Core::MemoryPermission::Read; + } + if (True(prot & MemoryProt::GpuWrite)) { + perms |= Core::MemoryPermission::Write; + } + if (True(prot & MemoryProt::GpuReadWrite)) { + perms |= Core::MemoryPermission::ReadWrite; + } + + impl.Protect(addr, size, perms); + + return ORBIS_OK; +} + +int MemoryManager::MTypeProtect(VAddr addr, size_t size, VMAType mtype, MemoryProt prot) { + std::scoped_lock lk{mutex}; + + // Find the virtual memory area that contains the specified address range. + auto it = FindVMA(addr); + if (it == vma_map.end() || !it->second.Contains(addr, size)) { + LOG_ERROR(Core, "Address range not mapped"); + return ORBIS_KERNEL_ERROR_EINVAL; + } + + VirtualMemoryArea& vma = it->second; + + if (vma.type == VMAType::Free) { + LOG_ERROR(Core, "Cannot change protection on free memory region"); + return ORBIS_KERNEL_ERROR_EINVAL; + } + + // Validate protection flags + constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead | + MemoryProt::CpuReadWrite | MemoryProt::GpuRead | + MemoryProt::GpuWrite | MemoryProt::GpuReadWrite; + + MemoryProt invalid_flags = prot & ~valid_flags; + if (u32(invalid_flags) != 0 && u32(invalid_flags) != u32(MemoryProt::NoAccess)) { + LOG_ERROR(Core, "Invalid protection flags: prot = {:#x}, invalid flags = {:#x}", u32(prot), + u32(invalid_flags)); + return ORBIS_KERNEL_ERROR_EINVAL; + } + + // Change type and protection + vma.type = mtype; + vma.prot = prot; + + // Set permissions + Core::MemoryPermission perms{}; + + if (True(prot & MemoryProt::CpuRead)) { + perms |= Core::MemoryPermission::Read; + } + if (True(prot & MemoryProt::CpuReadWrite)) { + perms |= Core::MemoryPermission::ReadWrite; + } + if (True(prot & MemoryProt::GpuRead)) { + perms |= Core::MemoryPermission::Read; + } + if (True(prot & MemoryProt::GpuWrite)) { + perms |= Core::MemoryPermission::Write; + } + if (True(prot & MemoryProt::GpuReadWrite)) { + perms |= Core::MemoryPermission::ReadWrite; + } + + impl.Protect(addr, size, perms); + + return ORBIS_OK; +} + int MemoryManager::VirtualQuery(VAddr addr, int flags, ::Libraries::Kernel::OrbisVirtualQueryInfo* info) { std::scoped_lock lk{mutex}; diff --git a/src/core/memory.h b/src/core/memory.h index 919995b0..d0935ffb 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -30,6 +30,7 @@ enum class MemoryProt : u32 { GpuWrite = 32, GpuReadWrite = 38, }; +DECLARE_ENUM_FLAG_OPERATORS(MemoryProt) enum class MemoryMapFlags : u32 { NoFlags = 0, @@ -163,6 +164,10 @@ public: int QueryProtection(VAddr addr, void** start, void** end, u32* prot); + int Protect(VAddr addr, size_t size, MemoryProt prot); + + int MTypeProtect(VAddr addr, size_t size, VMAType mtype, MemoryProt prot); + int VirtualQuery(VAddr addr, int flags, ::Libraries::Kernel::OrbisVirtualQueryInfo* info); int DirectMemoryQuery(PAddr addr, bool find_next,