mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-01-01 12:46:01 +00:00
Implemented sceKernelMTypeProtect and sceKernelMProtect (#387)
* Fixed ORBIS_KERNEL_MAP_OP_TYPE_PROTECT for batchmap2
* Fix merge
* Changed 4 to ORBIS_KERNEL_MAP_OP_TYPE_PROTECT
* Removed MProtect from AddressSpace
* Added Mtyprotect and moved Mprotect to ORBIS_KERNEL_MAP_OP_PROTECT
* Changed Protect for Windows
* reverted the previous function
* Fixed Mtypeprotect and MProtect
* ''
* ''
* Took out logs stopping build
* clang-format issues
* Fixed the order of mtypeprotect and mprotect in batchmap2
* ''
* update branch
* ''
* Fixed nits
* ''
* Update submodules to latest commits
* ''
* reverted ffmpeg
* ''
* Fixed the nits
* ''
* ''
* ''
* ''
* ''
* Fix clang formatting, DEBUG_ASSERT, and extra spacing
* Fix build issues
* Revert "Fix build issues"
This reverts commit 9185f96ec9
.
* ''
* ''
* ''
* Changes for MemoryProt Format
* ''
* ''
* ''
This commit is contained in:
parent
0dd6e257c5
commit
b9c6093717
|
@ -15,6 +15,7 @@
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#endif
|
#endif
|
||||||
|
#include "libraries/error_codes.h"
|
||||||
|
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
// Reserve space for the system address space using a zerofill section.
|
// Reserve space for the system address space using a zerofill section.
|
||||||
|
@ -231,27 +232,36 @@ struct AddressSpace::Impl {
|
||||||
|
|
||||||
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
|
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
|
||||||
DWORD new_flags{};
|
DWORD new_flags{};
|
||||||
if (read && write) {
|
|
||||||
|
if (read && write && execute) {
|
||||||
|
new_flags = PAGE_EXECUTE_READWRITE;
|
||||||
|
} else if (read && write) {
|
||||||
new_flags = PAGE_READWRITE;
|
new_flags = PAGE_READWRITE;
|
||||||
} else if (read && !write) {
|
} else if (read && !write) {
|
||||||
new_flags = PAGE_READONLY;
|
new_flags = PAGE_READONLY;
|
||||||
} else if (!read && !write) {
|
} else if (execute && !read && not write) {
|
||||||
|
new_flags = PAGE_EXECUTE;
|
||||||
|
} else if (!read && !write && !execute) {
|
||||||
new_flags = PAGE_NOACCESS;
|
new_flags = PAGE_NOACCESS;
|
||||||
} else {
|
} else {
|
||||||
UNIMPLEMENTED_MSG("Protection flag combination read={} write={}", read, write);
|
LOG_CRITICAL(Common_Memory,
|
||||||
|
"Unsupported protection flag combination for address {:#x}, size {}",
|
||||||
|
virtual_addr, size);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const VAddr virtual_end = virtual_addr + size;
|
DWORD old_flags{};
|
||||||
auto [it, end] = placeholders.equal_range({virtual_addr, virtual_end});
|
bool success =
|
||||||
while (it != end) {
|
VirtualProtect(reinterpret_cast<void*>(virtual_addr), size, new_flags, &old_flags);
|
||||||
const size_t offset = std::max(it->lower(), virtual_addr);
|
|
||||||
const size_t protect_length = std::min(it->upper(), virtual_end) - offset;
|
if (!success) {
|
||||||
DWORD old_flags{};
|
LOG_ERROR(Common_Memory,
|
||||||
if (!VirtualProtect(virtual_base + offset, protect_length, new_flags, &old_flags)) {
|
"Failed to change virtual memory protection for address {:#x}, size {}",
|
||||||
LOG_CRITICAL(Common_Memory, "Failed to change virtual memory protect rules");
|
virtual_addr, size);
|
||||||
}
|
|
||||||
++it;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use assert to ensure success in debug builds
|
||||||
|
DEBUG_ASSERT(success && "Failed to change virtual memory protection");
|
||||||
}
|
}
|
||||||
|
|
||||||
HANDLE process{};
|
HANDLE process{};
|
||||||
|
@ -493,7 +503,10 @@ void AddressSpace::Unmap(VAddr virtual_addr, size_t size, VAddr start_in_vma, VA
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddressSpace::Protect(VAddr virtual_addr, size_t size, MemoryPermission perms) {
|
void AddressSpace::Protect(VAddr virtual_addr, size_t size, MemoryPermission perms) {
|
||||||
return impl->Protect(virtual_addr, size, true, true, true);
|
const bool read = True(perms & MemoryPermission::Read);
|
||||||
|
const bool write = True(perms & MemoryPermission::Write);
|
||||||
|
const bool execute = True(perms & MemoryPermission::Execute);
|
||||||
|
return impl->Protect(virtual_addr, size, read, write, execute);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Core
|
} // namespace Core
|
||||||
|
|
|
@ -454,6 +454,8 @@ void LibKernel_Register(Core::Loader::SymbolsResolver* sym) {
|
||||||
LIB_FUNCTION("F6e0kwo4cnk", "libkernel", 1, "libkernel", 1, 1, sceKernelTriggerUserEvent);
|
LIB_FUNCTION("F6e0kwo4cnk", "libkernel", 1, "libkernel", 1, 1, sceKernelTriggerUserEvent);
|
||||||
LIB_FUNCTION("LJDwdSNTnDg", "libkernel", 1, "libkernel", 1, 1, sceKernelDeleteUserEvent);
|
LIB_FUNCTION("LJDwdSNTnDg", "libkernel", 1, "libkernel", 1, 1, sceKernelDeleteUserEvent);
|
||||||
LIB_FUNCTION("mJ7aghmgvfc", "libkernel", 1, "libkernel", 1, 1, sceKernelGetEventId);
|
LIB_FUNCTION("mJ7aghmgvfc", "libkernel", 1, "libkernel", 1, 1, sceKernelGetEventId);
|
||||||
|
LIB_FUNCTION("9bfdLIyuwCY", "libkernel", 1, "libkernel", 1, 1, sceKernelMTypeProtect);
|
||||||
|
LIB_FUNCTION("vSMAm3cxYTY", "libkernel", 1, "libkernel", 1, 1, sceKernelMProtect);
|
||||||
LIB_FUNCTION("23CPPI1tyBY", "libkernel", 1, "libkernel", 1, 1, sceKernelGetEventFilter);
|
LIB_FUNCTION("23CPPI1tyBY", "libkernel", 1, "libkernel", 1, 1, sceKernelGetEventFilter);
|
||||||
|
|
||||||
// misc
|
// misc
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/singleton.h"
|
#include "common/singleton.h"
|
||||||
|
#include "core/address_space.h"
|
||||||
#include "core/libraries/error_codes.h"
|
#include "core/libraries/error_codes.h"
|
||||||
#include "core/libraries/kernel/memory_management.h"
|
#include "core/libraries/kernel/memory_management.h"
|
||||||
#include "core/linker.h"
|
#include "core/linker.h"
|
||||||
|
@ -218,6 +219,19 @@ int PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void**
|
||||||
return memory->QueryProtection(std::bit_cast<VAddr>(addr), start, end, prot);
|
return memory->QueryProtection(std::bit_cast<VAddr>(addr), start, end, prot);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int PS4_SYSV_ABI sceKernelMProtect(const void* addr, size_t size, int prot) {
|
||||||
|
Core::MemoryManager* memory_manager = Core::Memory::Instance();
|
||||||
|
Core::MemoryProt protection_flags = static_cast<Core::MemoryProt>(prot);
|
||||||
|
return memory_manager->Protect(std::bit_cast<VAddr>(addr), size, protection_flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
int PS4_SYSV_ABI sceKernelMTypeProtect(const void* addr, size_t size, int mtype, int prot) {
|
||||||
|
Core::MemoryManager* memory_manager = Core::Memory::Instance();
|
||||||
|
Core::MemoryProt protection_flags = static_cast<Core::MemoryProt>(prot);
|
||||||
|
return memory_manager->MTypeProtect(std::bit_cast<VAddr>(addr), size,
|
||||||
|
static_cast<Core::VMAType>(mtype), protection_flags);
|
||||||
|
}
|
||||||
|
|
||||||
int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info,
|
int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info,
|
||||||
size_t infoSize) {
|
size_t infoSize) {
|
||||||
LOG_WARNING(Kernel_Vmm, "called offset = {:#x}, flags = {:#x}", offset, flags);
|
LOG_WARNING(Kernel_Vmm, "called offset = {:#x}, flags = {:#x}", offset, flags);
|
||||||
|
@ -258,7 +272,7 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEn
|
||||||
int* numEntriesOut, int flags) {
|
int* numEntriesOut, int flags) {
|
||||||
int result = ORBIS_OK;
|
int result = ORBIS_OK;
|
||||||
int processed = 0;
|
int processed = 0;
|
||||||
for (int i = 0; i < numEntries; i++, processed++) {
|
for (int i = 0; i < numEntries; i++) {
|
||||||
if (entries == nullptr || entries[i].length == 0 || entries[i].operation > 4) {
|
if (entries == nullptr || entries[i].length == 0 || entries[i].operation > 4) {
|
||||||
result = ORBIS_KERNEL_ERROR_EINVAL;
|
result = ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
break; // break and assign a value to numEntriesOut.
|
break; // break and assign a value to numEntriesOut.
|
||||||
|
@ -278,10 +292,41 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEn
|
||||||
}
|
}
|
||||||
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_UNMAP: {
|
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_UNMAP: {
|
||||||
result = sceKernelMunmap(entries[i].start, entries[i].length);
|
result = sceKernelMunmap(entries[i].start, entries[i].length);
|
||||||
LOG_INFO(Kernel_Vmm, "entry = {}, operation = {}, len = {:#x}, result = {}", i,
|
LOG_INFO(Kernel_Vmm, "BatchMap: entry = {}, operation = {}, len = {:#x}, result = {}",
|
||||||
entries[i].operation, entries[i].length, result);
|
i, entries[i].operation, entries[i].length, result);
|
||||||
|
|
||||||
|
if (result == 0)
|
||||||
|
processed++;
|
||||||
|
}
|
||||||
|
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_PROTECT: {
|
||||||
|
result = sceKernelMProtect(entries[i].start, entries[i].length, entries[i].protection);
|
||||||
|
LOG_INFO(Kernel_Vmm, "BatchMap: entry = {}, operation = {}, len = {:#x}, result = {}",
|
||||||
|
i, entries[i].operation, entries[i].length, result);
|
||||||
|
if (result != ORBIS_OK) {
|
||||||
|
LOG_ERROR(Kernel_Vmm, "BatchMap: MProtect failed on entry {} with result {}", i,
|
||||||
|
result);
|
||||||
|
}
|
||||||
|
if (result == 0) {
|
||||||
|
processed++;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_TYPE_PROTECT: {
|
||||||
|
result = sceKernelMTypeProtect(entries[i].start, entries[i].length, entries[i].type,
|
||||||
|
entries[i].protection);
|
||||||
|
LOG_INFO(Kernel_Vmm, "BatchMap: entry = {}, operation = {}, len = {:#x}, result = {}",
|
||||||
|
i, entries[i].operation, entries[i].length, result);
|
||||||
|
if (result != ORBIS_OK) {
|
||||||
|
LOG_ERROR(Kernel_Vmm, "BatchMap: MProtect failed on entry {} with result {}", i,
|
||||||
|
result);
|
||||||
|
}
|
||||||
|
if (result == 0) {
|
||||||
|
processed++;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_MAP_FLEXIBLE: {
|
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_MAP_FLEXIBLE: {
|
||||||
result = sceKernelMapNamedFlexibleMemory(&entries[i].start, entries[i].length,
|
result = sceKernelMapNamedFlexibleMemory(&entries[i].start, entries[i].length,
|
||||||
entries[i].protection, flags, "");
|
entries[i].protection, flags, "");
|
||||||
|
@ -291,14 +336,7 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEn
|
||||||
i, entries[i].operation, entries[i].length, (u8)entries[i].type, result);
|
i, entries[i].operation, entries[i].length, (u8)entries[i].type, result);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemoryOpTypes::ORBIS_KERNEL_MAP_OP_TYPE_PROTECT: {
|
|
||||||
// By now, ignore protection and log it instead
|
|
||||||
LOG_WARNING(Kernel_Vmm,
|
|
||||||
"entry = {}, operation = {}, len = {:#x}, type = {} "
|
|
||||||
"is UNSUPPORTED and skipped",
|
|
||||||
i, entries[i].operation, entries[i].length, (u8)entries[i].type);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default: {
|
default: {
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
@ -308,6 +346,8 @@ s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEn
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
LOG_INFO(Kernel_Vmm, "sceKernelBatchMap2 finished: processed = {}, result = {}", processed,
|
||||||
|
result);
|
||||||
if (numEntriesOut != NULL) { // can be zero. do not return an error code.
|
if (numEntriesOut != NULL) { // can be zero. do not return an error code.
|
||||||
*numEntriesOut = processed;
|
*numEntriesOut = processed;
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,6 +95,10 @@ s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len,
|
||||||
int flags);
|
int flags);
|
||||||
int PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void** end, u32* prot);
|
int PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void** end, u32* prot);
|
||||||
|
|
||||||
|
int PS4_SYSV_ABI sceKernelMProtect(const void* addr, size_t size, int prot);
|
||||||
|
|
||||||
|
int PS4_SYSV_ABI sceKernelMTypeProtect(const void* addr, size_t size, int mtype, int prot);
|
||||||
|
|
||||||
int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info,
|
int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info,
|
||||||
size_t infoSize);
|
size_t infoSize);
|
||||||
s32 PS4_SYSV_ABI sceKernelAvailableFlexibleMemorySize(size_t* sizeOut);
|
s32 PS4_SYSV_ABI sceKernelAvailableFlexibleMemorySize(size_t* sizeOut);
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
#include "core/libraries/error_codes.h"
|
#include "core/libraries/error_codes.h"
|
||||||
#include "core/libraries/kernel/memory_management.h"
|
#include "core/libraries/kernel/memory_management.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
#include "video_core/renderer_vulkan/vk_instance.h"
|
||||||
#include "video_core/renderer_vulkan/vk_rasterizer.h"
|
#include "video_core/renderer_vulkan/vk_rasterizer.h"
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
|
@ -292,6 +293,118 @@ int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* pr
|
||||||
return ORBIS_OK;
|
return ORBIS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int MemoryManager::Protect(VAddr addr, size_t size, MemoryProt prot) {
|
||||||
|
std::scoped_lock lk{mutex};
|
||||||
|
|
||||||
|
// Find the virtual memory area that contains the specified address range.
|
||||||
|
auto it = FindVMA(addr);
|
||||||
|
if (it == vma_map.end() || !it->second.Contains(addr, size)) {
|
||||||
|
LOG_ERROR(Core, "Address range not mapped");
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
VirtualMemoryArea& vma = it->second;
|
||||||
|
if (vma.type == VMAType::Free) {
|
||||||
|
LOG_ERROR(Core, "Cannot change protection on free memory region");
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate protection flags
|
||||||
|
constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead |
|
||||||
|
MemoryProt::CpuReadWrite | MemoryProt::GpuRead |
|
||||||
|
MemoryProt::GpuWrite | MemoryProt::GpuReadWrite;
|
||||||
|
|
||||||
|
MemoryProt invalid_flags = prot & ~valid_flags;
|
||||||
|
if (u32(invalid_flags) != 0 && u32(invalid_flags) != u32(MemoryProt::NoAccess)) {
|
||||||
|
LOG_ERROR(Core, "Invalid protection flags: prot = {:#x}, invalid flags = {:#x}", u32(prot),
|
||||||
|
u32(invalid_flags));
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change protection
|
||||||
|
vma.prot = prot;
|
||||||
|
|
||||||
|
// Set permissions
|
||||||
|
Core::MemoryPermission perms{};
|
||||||
|
|
||||||
|
if (True(prot & MemoryProt::CpuRead)) {
|
||||||
|
perms |= Core::MemoryPermission::Read;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::CpuReadWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::ReadWrite;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuRead)) {
|
||||||
|
perms |= Core::MemoryPermission::Read;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::Write;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuReadWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::ReadWrite;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl.Protect(addr, size, perms);
|
||||||
|
|
||||||
|
return ORBIS_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
int MemoryManager::MTypeProtect(VAddr addr, size_t size, VMAType mtype, MemoryProt prot) {
|
||||||
|
std::scoped_lock lk{mutex};
|
||||||
|
|
||||||
|
// Find the virtual memory area that contains the specified address range.
|
||||||
|
auto it = FindVMA(addr);
|
||||||
|
if (it == vma_map.end() || !it->second.Contains(addr, size)) {
|
||||||
|
LOG_ERROR(Core, "Address range not mapped");
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
VirtualMemoryArea& vma = it->second;
|
||||||
|
|
||||||
|
if (vma.type == VMAType::Free) {
|
||||||
|
LOG_ERROR(Core, "Cannot change protection on free memory region");
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate protection flags
|
||||||
|
constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead |
|
||||||
|
MemoryProt::CpuReadWrite | MemoryProt::GpuRead |
|
||||||
|
MemoryProt::GpuWrite | MemoryProt::GpuReadWrite;
|
||||||
|
|
||||||
|
MemoryProt invalid_flags = prot & ~valid_flags;
|
||||||
|
if (u32(invalid_flags) != 0 && u32(invalid_flags) != u32(MemoryProt::NoAccess)) {
|
||||||
|
LOG_ERROR(Core, "Invalid protection flags: prot = {:#x}, invalid flags = {:#x}", u32(prot),
|
||||||
|
u32(invalid_flags));
|
||||||
|
return ORBIS_KERNEL_ERROR_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change type and protection
|
||||||
|
vma.type = mtype;
|
||||||
|
vma.prot = prot;
|
||||||
|
|
||||||
|
// Set permissions
|
||||||
|
Core::MemoryPermission perms{};
|
||||||
|
|
||||||
|
if (True(prot & MemoryProt::CpuRead)) {
|
||||||
|
perms |= Core::MemoryPermission::Read;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::CpuReadWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::ReadWrite;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuRead)) {
|
||||||
|
perms |= Core::MemoryPermission::Read;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::Write;
|
||||||
|
}
|
||||||
|
if (True(prot & MemoryProt::GpuReadWrite)) {
|
||||||
|
perms |= Core::MemoryPermission::ReadWrite;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl.Protect(addr, size, perms);
|
||||||
|
|
||||||
|
return ORBIS_OK;
|
||||||
|
}
|
||||||
|
|
||||||
int MemoryManager::VirtualQuery(VAddr addr, int flags,
|
int MemoryManager::VirtualQuery(VAddr addr, int flags,
|
||||||
::Libraries::Kernel::OrbisVirtualQueryInfo* info) {
|
::Libraries::Kernel::OrbisVirtualQueryInfo* info) {
|
||||||
std::scoped_lock lk{mutex};
|
std::scoped_lock lk{mutex};
|
||||||
|
|
|
@ -30,6 +30,7 @@ enum class MemoryProt : u32 {
|
||||||
GpuWrite = 32,
|
GpuWrite = 32,
|
||||||
GpuReadWrite = 38,
|
GpuReadWrite = 38,
|
||||||
};
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(MemoryProt)
|
||||||
|
|
||||||
enum class MemoryMapFlags : u32 {
|
enum class MemoryMapFlags : u32 {
|
||||||
NoFlags = 0,
|
NoFlags = 0,
|
||||||
|
@ -163,6 +164,10 @@ public:
|
||||||
|
|
||||||
int QueryProtection(VAddr addr, void** start, void** end, u32* prot);
|
int QueryProtection(VAddr addr, void** start, void** end, u32* prot);
|
||||||
|
|
||||||
|
int Protect(VAddr addr, size_t size, MemoryProt prot);
|
||||||
|
|
||||||
|
int MTypeProtect(VAddr addr, size_t size, VMAType mtype, MemoryProt prot);
|
||||||
|
|
||||||
int VirtualQuery(VAddr addr, int flags, ::Libraries::Kernel::OrbisVirtualQueryInfo* info);
|
int VirtualQuery(VAddr addr, int flags, ::Libraries::Kernel::OrbisVirtualQueryInfo* info);
|
||||||
|
|
||||||
int DirectMemoryQuery(PAddr addr, bool find_next,
|
int DirectMemoryQuery(PAddr addr, bool find_next,
|
||||||
|
|
Loading…
Reference in a new issue