From 30f76ed9c2d15b98b45a51274d39538bd6b69fa6 Mon Sep 17 00:00:00 2001 From: yuzubot Date: Tue, 26 Dec 2023 00:56:59 +0000 Subject: [PATCH] Merge PR 12466 --- src/common/CMakeLists.txt | 2 + src/common/heap_tracker.cpp | 385 ++++++++++++++++++++++ src/common/heap_tracker.h | 103 ++++++ src/common/host_memory.cpp | 10 +- src/common/host_memory.h | 11 +- src/core/CMakeLists.txt | 1 + src/core/arm/dynarmic/arm_dynarmic.cpp | 49 +++ src/core/arm/dynarmic/arm_dynarmic.h | 20 ++ src/core/arm/dynarmic/arm_dynarmic_32.cpp | 5 + src/core/arm/dynarmic/arm_dynarmic_64.cpp | 5 + src/core/hle/kernel/k_page_table_base.cpp | 26 +- src/core/hle/kernel/k_page_table_base.h | 3 + src/core/hle/kernel/k_process.cpp | 6 +- src/core/memory.cpp | 104 ++++-- src/core/memory.h | 7 +- src/tests/common/host_memory.cpp | 99 +++--- 16 files changed, 742 insertions(+), 94 deletions(-) create mode 100644 src/common/heap_tracker.cpp create mode 100644 src/common/heap_tracker.h create mode 100644 src/core/arm/dynarmic/arm_dynarmic.cpp diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index b58a7073f..8c57d47c6 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -64,6 +64,8 @@ add_library(common STATIC fs/path_util.cpp fs/path_util.h hash.h + heap_tracker.cpp + heap_tracker.h hex_util.cpp hex_util.h host_memory.cpp diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp new file mode 100644 index 000000000..82a837f04 --- /dev/null +++ b/src/common/heap_tracker.cpp @@ -0,0 +1,385 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "common/assert.h" +#include "common/heap_tracker.h" + +namespace Common { + +namespace { + +constexpr size_t MaxResidentMapCount = 0x8000; + +} // namespace + +HeapTracker::HeapTracker(Common::HostMemory& buffer) : m_buffer(buffer) {} +HeapTracker::~HeapTracker() = default; + +void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length, + MemoryPermission perm, bool is_separate_heap) { + // When mapping other memory, map pages immediately. + if (!is_separate_heap) { + m_buffer.Map(virtual_offset, host_offset, length, perm, false); + return; + } + + { + // We are mapping part of a separate heap. + std::scoped_lock lk{m_lock}; + + auto* map = new SeparateHeapMap{ + .vaddr = virtual_offset, + .paddr = host_offset, + .size = length, + .map_id = m_next_map_id++, + .tick = m_tick++, + .perm = perm, + .is_resident = false, + }; + + // Insert into mappings. + m_mappings.insert(*map); + } + + // Finally, map. + this->DeferredMapSeparateHeap(virtual_offset); +} + +void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) { + // If this is a separate heap... + if (is_separate_heap) { + std::scoped_lock lk{m_rebuild_lock, m_lock}; + + const SeparateHeapMap key{ + .vaddr = virtual_offset, + .size = size, + }; + + // Split at the boundaries of the region we are removing. + this->SplitHeapMapLocked(virtual_offset); + this->SplitHeapMapLocked(virtual_offset + size); + + // Erase all mappings in range. + auto it = m_mappings.find(key); + while (it != m_mappings.end() && it->vaddr < virtual_offset + size) { + // Get pointer to item. + SeparateHeapMap* const item = std::addressof(*it); + + if (item->is_resident) { + // Unlink from resident tree. + m_resident_mappings.erase(m_resident_mappings.iterator_to(*item)); + + // Decrease reference count. + const auto count_it = m_resident_map_counts.find(item->map_id); + this->RemoveReferenceLocked(count_it, 1); + } + + // Unlink from mapping tree and advance. + it = m_mappings.erase(it); + + // Free the item. + delete item; + } + } + + // Unmap pages. + m_buffer.Unmap(virtual_offset, size, false); +} + +void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) { + // Ensure no rebuild occurs while reprotecting. + std::shared_lock lk{m_rebuild_lock}; + + // Split at the boundaries of the region we are reprotecting. + this->SplitHeapMap(virtual_offset, size); + + // Declare tracking variables. + VAddr cur = virtual_offset; + VAddr end = virtual_offset + size; + + while (cur < end) { + VAddr next = cur; + bool should_protect = false; + + { + std::scoped_lock lk2{m_lock}; + + const SeparateHeapMap key{ + .vaddr = next, + }; + + // Try to get the next mapping corresponding to this address. + const auto it = m_mappings.nfind_key(key); + + if (it == m_mappings.end()) { + // There are no separate heap mappings remaining. + next = end; + should_protect = true; + } else if (it->vaddr == cur) { + // We are in range. + // Update permission bits. + it->perm = perm; + + // Determine next address and whether we should protect. + next = cur + it->size; + should_protect = it->is_resident; + } else /* if (it->vaddr > cur) */ { + // We weren't in range, but there is a block coming up that will be. + next = it->vaddr; + should_protect = true; + } + } + + // Clamp to end. + next = std::min(next, end); + + // Reprotect, if we need to. + if (should_protect) { + m_buffer.Protect(cur, next - cur, perm); + } + + // Advance. + cur = next; + } +} + +bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) { + if (m_buffer.IsInVirtualRange(fault_address)) { + return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer()); + } + + return false; +} + +bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) { + std::scoped_lock lk{m_lock}; + + while (this->IsEvictRequiredLocked()) { + // Unlock before we rebuild to ensure proper lock ordering. + m_lock.unlock(); + + // Evict four maps. + for (size_t i = 0; i < 4; /* ... */) { + i += this->EvictSingleSeparateHeapMap(); + } + + // Lock again. + m_lock.lock(); + } + + // Check to ensure this was a non-resident separate heap mapping. + const auto it = this->GetNearestHeapMapLocked(virtual_offset); + if (it == m_mappings.end()) { + // Not in any separate heap. + return false; + } + if (it->is_resident) { + // Already mapped and shouldn't be considered again. + return false; + } + + // Map the area. + m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false); + + // This map is now resident. + this->AddReferenceLocked(it->map_id, 1); + it->is_resident = true; + it->tick = m_tick++; + + // Insert into resident maps. + m_resident_mappings.insert(*it); + + // We succeeded. + return true; +} + +bool HeapTracker::EvictSingleSeparateHeapMap() { + std::scoped_lock lk{m_rebuild_lock, m_lock}; + + ASSERT(!m_resident_mappings.empty()); + + // Select the item with the lowest tick to evict. + auto* const item = std::addressof(*m_resident_mappings.begin()); + auto it = m_mappings.iterator_to(*item); + + // Track the map ID. + const size_t map_id = it->map_id; + + // Walk backwards until we find the first entry. + while (it != m_mappings.begin()) { + // If the previous element does not have the same map ID, stop. + const auto prev = std::prev(it); + if (prev->map_id != map_id) { + break; + } + + // Continue. + it = prev; + } + + // Track the begin and end address. + const VAddr begin_vaddr = it->vaddr; + VAddr end_vaddr = begin_vaddr; + + // Get the count iterator. + const auto count_it = m_resident_map_counts.find(map_id); + + // Declare whether we have erased an underlying mapping. + bool was_erased = false; + + // Unmark and merge everything in range. + while (it != m_mappings.end() && it->map_id == map_id) { + if (it->is_resident) { + // Remove from resident tree. + m_resident_mappings.erase(m_resident_mappings.iterator_to(*it)); + it->is_resident = false; + + // Remove reference count. + was_erased |= this->RemoveReferenceLocked(count_it, 1); + } + + // Update the end address. + end_vaddr = it->vaddr + it->size; + + // Advance. + it = this->MergeHeapMapForEvictLocked(it); + } + + // Finally, unmap. + ASSERT(end_vaddr >= begin_vaddr); + m_buffer.Unmap(begin_vaddr, end_vaddr - begin_vaddr, false); + + // Return whether we actually removed a mapping. + // This will be true if there were no holes, which is likely. + return was_erased; +} + +void HeapTracker::SplitHeapMap(VAddr offset, size_t size) { + std::scoped_lock lk{m_lock}; + + this->SplitHeapMapLocked(offset); + this->SplitHeapMapLocked(offset + size); +} + +void HeapTracker::SplitHeapMapLocked(VAddr offset) { + const auto it = this->GetNearestHeapMapLocked(offset); + if (it == m_mappings.end() || it->vaddr == offset) { + // Not contained or no split required. + return; + } + + // Get the underlying item as the left. + auto* const left = std::addressof(*it); + + // Cache the original size values. + const size_t size = left->size; + + // Adjust the left map. + const size_t left_size = offset - left->vaddr; + left->size = left_size; + + // Create the new right map. + auto* const right = new SeparateHeapMap{ + .vaddr = left->vaddr + left_size, + .paddr = left->paddr + left_size, + .size = size - left_size, + .map_id = left->map_id, + .tick = left->tick, + .perm = left->perm, + .is_resident = left->is_resident, + }; + + // Insert the new right map. + m_mappings.insert(*right); + + // If the original map was not resident, we are done. + if (!left->is_resident) { + return; + } + + // Update reference count. + this->AddReferenceLocked(left->map_id, 1); + + // Insert right into resident map. + m_resident_mappings.insert(*right); +} + +HeapTracker::AddrTree::iterator HeapTracker::MergeHeapMapForEvictLocked(AddrTree::iterator it) { + if (it == m_mappings.end()) { + // Not contained. + return it; + } + + if (it == m_mappings.begin()) { + // Nothing to merge with. + return std::next(it); + } + + // Get the left and right items. + auto* const right = std::addressof(*it); + auto* const left = std::addressof(*std::prev(it)); + + if (left->vaddr + left->size != right->vaddr) { + // Virtual range not contiguous, cannot merge. + return std::next(it); + } + + if (left->paddr + left->size != right->paddr) { + // Physical range not contiguous, cannot merge. + return std::next(it); + } + + if (left->perm != right->perm) { + // Permissions mismatch, cannot merge. + return std::next(it); + } + + if (left->map_id != right->map_id) { + // Map ID mismatch, cannot merge. + return std::next(it); + } + + // Merge size to the left. + left->size += right->size; + + // Erase the right element. + const auto next_it = m_mappings.erase(it); + + // Free the right element. + delete right; + + // Return the iterator to the next position. + return next_it; +} + +HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) { + const SeparateHeapMap key{ + .vaddr = offset, + }; + + return m_mappings.find(key); +} + +void HeapTracker::AddReferenceLocked(size_t map_id, size_t inc) { + m_resident_map_counts[map_id]++; +} + +bool HeapTracker::RemoveReferenceLocked(MapCountTree::iterator it, size_t dec) { + ASSERT(it != m_resident_map_counts.end()); + + const auto new_value = it->second -= dec; + ASSERT(new_value >= 0); + + if (new_value <= 0) { + m_resident_map_counts.erase(it); + return true; + } + + return false; +} + +bool HeapTracker::IsEvictRequiredLocked() { + return m_resident_map_counts.size() > MaxResidentMapCount; +} + +} // namespace Common diff --git a/src/common/heap_tracker.h b/src/common/heap_tracker.h new file mode 100644 index 000000000..72829565a --- /dev/null +++ b/src/common/heap_tracker.h @@ -0,0 +1,103 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include +#include +#include + +#include "common/host_memory.h" +#include "common/intrusive_red_black_tree.h" + +namespace Common { + +struct SeparateHeapMap { + Common::IntrusiveRedBlackTreeNode addr_node{}; + Common::IntrusiveRedBlackTreeNode tick_node{}; + VAddr vaddr{}; + PAddr paddr{}; + size_t size{}; + size_t map_id{}; + size_t tick{}; + MemoryPermission perm{}; + bool is_resident{}; +}; + +struct SeparateHeapMapAddrComparator { + static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) { + if (lhs.vaddr < rhs.vaddr) { + return -1; + } else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) { + return 0; + } else { + return 1; + } + } +}; + +struct SeparateHeapMapTickComparator { + static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) { + if (lhs.tick < rhs.tick) { + return -1; + } else if (lhs.tick > rhs.tick) { + return 1; + } else { + return SeparateHeapMapAddrComparator::Compare(lhs, rhs); + } + } +}; + +class HeapTracker { +public: + explicit HeapTracker(Common::HostMemory& buffer); + ~HeapTracker(); + + void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm, + bool is_separate_heap); + void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap); + void Protect(size_t virtual_offset, size_t length, MemoryPermission perm); + u8* VirtualBasePointer() { + return m_buffer.VirtualBasePointer(); + } + + bool DeferredMapSeparateHeap(u8* fault_address); + bool DeferredMapSeparateHeap(size_t virtual_offset); + +private: + Common::HostMemory& m_buffer; + + std::shared_mutex m_rebuild_lock{}; + std::mutex m_lock{}; + size_t m_next_map_id{}; + size_t m_tick{}; + +private: + using AddrTreeTraits = + Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>; + using AddrTree = AddrTreeTraits::TreeType; + + using TickTreeTraits = + Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>; + using TickTree = TickTreeTraits::TreeType; + using MapCountTree = std::map; + + MapCountTree m_resident_map_counts{}; + AddrTree m_mappings{}; + TickTree m_resident_mappings{}; + +private: + void SplitHeapMap(VAddr offset, size_t size); + void SplitHeapMapLocked(VAddr offset); + + AddrTree::iterator MergeHeapMapForEvictLocked(AddrTree::iterator cur); + AddrTree::iterator GetNearestHeapMapLocked(VAddr offset); + + bool EvictSingleSeparateHeapMap(); + + void AddReferenceLocked(size_t map_id, size_t inc); + bool RemoveReferenceLocked(MapCountTree::iterator map_id, size_t dec); + bool IsEvictRequiredLocked(); +}; + +} // namespace Common diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp index e540375b8..860c39e6a 100644 --- a/src/common/host_memory.cpp +++ b/src/common/host_memory.cpp @@ -679,7 +679,7 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default; HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, - MemoryPermission perms) { + MemoryPermission perms, bool separate_heap) { ASSERT(virtual_offset % PageAlignment == 0); ASSERT(host_offset % PageAlignment == 0); ASSERT(length % PageAlignment == 0); @@ -691,7 +691,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms); } -void HostMemory::Unmap(size_t virtual_offset, size_t length) { +void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) { ASSERT(virtual_offset % PageAlignment == 0); ASSERT(length % PageAlignment == 0); ASSERT(virtual_offset + length <= virtual_size); @@ -701,14 +701,16 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length) { impl->Unmap(virtual_offset + virtual_base_offset, length); } -void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write, - bool execute) { +void HostMemory::Protect(size_t virtual_offset, size_t length, MemoryPermission perm) { ASSERT(virtual_offset % PageAlignment == 0); ASSERT(length % PageAlignment == 0); ASSERT(virtual_offset + length <= virtual_size); if (length == 0 || !virtual_base || !impl) { return; } + const bool read = True(perm & MemoryPermission::Read); + const bool write = True(perm & MemoryPermission::Write); + const bool execute = True(perm & MemoryPermission::Execute); impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute); } diff --git a/src/common/host_memory.h b/src/common/host_memory.h index 747c5850c..72fbb05af 100644 --- a/src/common/host_memory.h +++ b/src/common/host_memory.h @@ -40,11 +40,12 @@ public: HostMemory(HostMemory&& other) noexcept; HostMemory& operator=(HostMemory&& other) noexcept; - void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms); + void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms, + bool separate_heap); - void Unmap(size_t virtual_offset, size_t length); + void Unmap(size_t virtual_offset, size_t length, bool separate_heap); - void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute = false); + void Protect(size_t virtual_offset, size_t length, MemoryPermission perms); void EnableDirectMappedAddress(); @@ -64,6 +65,10 @@ public: return virtual_base; } + bool IsInVirtualRange(void* address) const noexcept { + return address >= virtual_base && address < virtual_base + virtual_size; + } + private: size_t backing_size{}; size_t virtual_size{}; diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 96ab39cb8..e960edb47 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -978,6 +978,7 @@ endif() if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) target_sources(core PRIVATE + arm/dynarmic/arm_dynarmic.cpp arm/dynarmic/arm_dynarmic.h arm/dynarmic/arm_dynarmic_64.cpp arm/dynarmic/arm_dynarmic_64.h diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp new file mode 100644 index 000000000..e6e9fc45b --- /dev/null +++ b/src/core/arm/dynarmic/arm_dynarmic.cpp @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifdef __linux__ + +#include "common/signal_chain.h" + +#include "core/arm/dynarmic/arm_dynarmic.h" +#include "core/hle/kernel/k_process.h" +#include "core/memory.h" + +namespace Core { + +namespace { + +thread_local Core::Memory::Memory* g_current_memory{}; +std::once_flag g_registered{}; +struct sigaction g_old_segv {}; + +void HandleSigSegv(int sig, siginfo_t* info, void* ctx) { + if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) { + return; + } + + return g_old_segv.sa_sigaction(sig, info, ctx); +} + +} // namespace + +ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) { + g_current_memory = std::addressof(process->GetMemory()); +} + +ScopedJitExecution::~ScopedJitExecution() { + g_current_memory = nullptr; +} + +void ScopedJitExecution::RegisterHandler() { + std::call_once(g_registered, [] { + struct sigaction sa {}; + sa.sa_sigaction = &HandleSigSegv; + sa.sa_flags = SA_SIGINFO | SA_ONSTACK; + Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv)); + }); +} + +} // namespace Core + +#endif diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h index eef7c3116..53dd18815 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.h +++ b/src/core/arm/dynarmic/arm_dynarmic.h @@ -26,4 +26,24 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) { return static_cast(hr); } +#ifdef __linux__ + +class ScopedJitExecution { +public: + explicit ScopedJitExecution(Kernel::KProcess* process); + ~ScopedJitExecution(); + static void RegisterHandler(); +}; + +#else + +class ScopedJitExecution { +public: + explicit ScopedJitExecution(Kernel::KProcess* process) {} + ~ScopedJitExecution() {} + static void RegisterHandler() {} +}; + +#endif + } // namespace Core diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index c78cfd528..36478f722 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -331,11 +331,15 @@ bool ArmDynarmic32::IsInThumbMode() const { } HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) { + ScopedJitExecution sj(thread->GetOwnerProcess()); + m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Run()); } HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) { + ScopedJitExecution sj(thread->GetOwnerProcess()); + m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Step()); } @@ -377,6 +381,7 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc m_cp15(std::make_shared(*this)), m_core_index{core_index} { auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl(); m_jit = MakeJit(&page_table_impl); + ScopedJitExecution::RegisterHandler(); } ArmDynarmic32::~ArmDynarmic32() = default; diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index f351b13d9..c811c8ad5 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -362,11 +362,15 @@ std::shared_ptr ArmDynarmic64::MakeJit(Common::PageTable* pa } HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) { + ScopedJitExecution sj(thread->GetOwnerProcess()); + m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Run()); } HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) { + ScopedJitExecution sj(thread->GetOwnerProcess()); + m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Step()); } @@ -406,6 +410,7 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc auto& page_table = process->GetPageTable().GetBasePageTable(); auto& page_table_impl = page_table.GetImpl(); m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth()); + ScopedJitExecution::RegisterHandler(); } ArmDynarmic64::~ArmDynarmic64() = default; diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp index 423289145..8c1549559 100644 --- a/src/core/hle/kernel/k_page_table_base.cpp +++ b/src/core/hle/kernel/k_page_table_base.cpp @@ -434,7 +434,7 @@ Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool void KPageTableBase::Finalize() { auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { if (Settings::IsFastmemEnabled()) { - m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); + m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false); } }; @@ -5243,7 +5243,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) { // Unmap. R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false, unmap_properties, - OperationType::Unmap, true)); + OperationType::UnmapPhysical, true)); } // Check if we're done. @@ -5326,7 +5326,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) { // Map the papges. R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, cur_pg, map_properties, - OperationType::MapFirstGroup, false)); + OperationType::MapFirstGroupPhysical, false)); } } @@ -5480,7 +5480,7 @@ Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) // Unmap. R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false, - unmap_properties, OperationType::Unmap, false)); + unmap_properties, OperationType::UnmapPhysical, false)); } // Check if we're done. @@ -5655,7 +5655,10 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a // or free them to the page list, and so it goes unused (along with page properties). switch (operation) { - case OperationType::Unmap: { + case OperationType::Unmap: + case OperationType::UnmapPhysical: { + const bool separate_heap = operation == OperationType::UnmapPhysical; + // Ensure that any pages we track are closed on exit. KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager()); SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); @@ -5664,7 +5667,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a this->MakePageGroup(pages_to_close, virt_addr, num_pages); // Unmap. - m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize); + m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize, separate_heap); R_SUCCEED(); } @@ -5672,7 +5675,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a ASSERT(virt_addr != 0); ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr, - ConvertToMemoryPermission(properties.perm)); + ConvertToMemoryPermission(properties.perm), false); // Open references to pages, if we should. if (this->IsHeapPhysicalAddress(phys_addr)) { @@ -5711,16 +5714,19 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a switch (operation) { case OperationType::MapGroup: - case OperationType::MapFirstGroup: { + case OperationType::MapFirstGroup: + case OperationType::MapFirstGroupPhysical: { + const bool separate_heap = operation == OperationType::MapFirstGroupPhysical; + // We want to maintain a new reference to every page in the group. - KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); + KScopedPageGroup spg(page_group, operation == OperationType::MapGroup); for (const auto& node : page_group) { const size_t size{node.GetNumPages() * PageSize}; // Map the pages. m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(), - ConvertToMemoryPermission(properties.perm)); + ConvertToMemoryPermission(properties.perm), separate_heap); virt_addr += size; } diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h index 556d230b3..077cafc96 100644 --- a/src/core/hle/kernel/k_page_table_base.h +++ b/src/core/hle/kernel/k_page_table_base.h @@ -104,6 +104,9 @@ protected: ChangePermissionsAndRefresh = 5, ChangePermissionsAndRefreshAndFlush = 6, Separate = 7, + + MapFirstGroupPhysical = 65000, + UnmapPhysical = 65001, }; static constexpr size_t MaxPhysicalMapAlignment = 1_GiB; diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index d6869c228..068e71dff 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -1237,8 +1237,10 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) { auto& buffer = m_kernel.System().DeviceMemory().buffer; const auto& code = code_set.CodeSegment(); const auto& patch = code_set.PatchSegment(); - buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true); - buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true); + buffer.Protect(GetInteger(base_addr + code.addr), code.size, + Common::MemoryPermission::Read | Common::MemoryPermission::Execute); + buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, + Common::MemoryPermission::Read | Common::MemoryPermission::Execute); ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None); } #endif diff --git a/src/core/memory.cpp b/src/core/memory.cpp index c7eb32c19..1c84fa29e 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -10,6 +10,7 @@ #include "common/assert.h" #include "common/atomic_ops.h" #include "common/common_types.h" +#include "common/heap_tracker.h" #include "common/logging/log.h" #include "common/page_table.h" #include "common/scope_exit.h" @@ -52,10 +53,18 @@ struct Memory::Impl { } else { current_page_table->fastmem_arena = nullptr; } + +#ifdef __linux__ + heap_tracker.emplace(system.DeviceMemory().buffer); + buffer = std::addressof(*heap_tracker); +#else + buffer = std::addressof(system.DeviceMemory().buffer); +#endif } void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, - Common::PhysicalAddress target, Common::MemoryPermission perms) { + Common::PhysicalAddress target, Common::MemoryPermission perms, + bool separate_heap) { ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", @@ -64,19 +73,20 @@ struct Memory::Impl { Common::PageType::Memory); if (current_page_table->fastmem_arena) { - system.DeviceMemory().buffer.Map(GetInteger(base), - GetInteger(target) - DramMemoryMap::Base, size, perms); + buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms, + separate_heap); } } - void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { + void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, + bool separate_heap) { ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, Common::PageType::Unmapped); if (current_page_table->fastmem_arena) { - system.DeviceMemory().buffer.Unmap(GetInteger(base), size); + buffer->Unmap(GetInteger(base), size, separate_heap); } } @@ -89,11 +99,6 @@ struct Memory::Impl { return; } - const bool is_r = True(perms & Common::MemoryPermission::Read); - const bool is_w = True(perms & Common::MemoryPermission::Write); - const bool is_x = - True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled(); - u64 protect_bytes{}; u64 protect_begin{}; for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) { @@ -102,8 +107,7 @@ struct Memory::Impl { switch (page_type) { case Common::PageType::RasterizerCachedMemory: if (protect_bytes > 0) { - system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, - is_x); + buffer->Protect(protect_begin, protect_bytes, perms); protect_bytes = 0; } break; @@ -116,7 +120,7 @@ struct Memory::Impl { } if (protect_bytes > 0) { - system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x); + buffer->Protect(protect_begin, protect_bytes, perms); } } @@ -486,7 +490,9 @@ struct Memory::Impl { } if (current_page_table->fastmem_arena) { - system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug); + const auto perm{debug ? Common::MemoryPermission{} + : Common::MemoryPermission::ReadWrite}; + buffer->Protect(vaddr, size, perm); } // Iterate over a contiguous CPU address space, marking/unmarking the region. @@ -543,9 +549,14 @@ struct Memory::Impl { } if (current_page_table->fastmem_arena) { - const bool is_read_enable = - !Settings::values.use_reactive_flushing.GetValue() || !cached; - system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); + Common::MemoryPermission perm{}; + if (!Settings::values.use_reactive_flushing.GetValue() || !cached) { + perm |= Common::MemoryPermission::Read; + } + if (!cached) { + perm |= Common::MemoryPermission::Write; + } + buffer->Protect(vaddr, size, perm); } // Iterate over a contiguous CPU address space, which corresponds to the specified GPU @@ -719,6 +730,17 @@ struct Memory::Impl { GetInteger(vaddr), []() {}, []() {}); } + void FixPageProtection(u64 vaddr) { + vaddr = Common::AlignDown(vaddr, YUZU_PAGESIZE); + + if (!AddressSpaceContains(*current_page_table, vaddr, 1)) [[unlikely]] { + return; + } + + ProtectRegion(*current_page_table, vaddr, YUZU_PAGESIZE, + Common::MemoryPermission::ReadWrite); + } + /** * Reads a particular data type out of memory at the given virtual address. * @@ -856,6 +878,13 @@ struct Memory::Impl { std::array rasterizer_write_areas{}; std::span gpu_dirty_managers; std::mutex sys_core_guard; + + std::optional heap_tracker; +#ifdef __linux__ + Common::HeapTracker* buffer{}; +#else + Common::HostMemory* buffer{}; +#endif }; Memory::Memory(Core::System& system_) : system{system_} { @@ -873,12 +902,14 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process) { } void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, - Common::PhysicalAddress target, Common::MemoryPermission perms) { - impl->MapMemoryRegion(page_table, base, size, target, perms); + Common::PhysicalAddress target, Common::MemoryPermission perms, + bool separate_heap) { + impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap); } -void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { - impl->UnmapRegion(page_table, base, size); +void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, + bool separate_heap) { + impl->UnmapRegion(page_table, base, size, separate_heap); } void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size, @@ -1048,7 +1079,9 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) { } bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { - bool mapped = true; + [[maybe_unused]] bool mapped = true; + [[maybe_unused]] bool rasterizer = false; + u8* const ptr = impl->GetPointerImpl( GetInteger(vaddr), [&] { @@ -1056,8 +1089,31 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { GetInteger(vaddr)); mapped = false; }, - [&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); }); - return mapped && ptr != nullptr; + [&] { + impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); + rasterizer = true; + }); + + const bool mapping_exists = mapped && ptr != nullptr; + +#ifdef __linux__ + if (mapping_exists && !rasterizer) { + if (!impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr))) { + // GPU may have raced reprotecting this page, try to fix it. + impl->FixPageProtection(GetInteger(vaddr)); + } + } +#endif + + return mapping_exists; +} + +bool Memory::InvalidateSeparateHeap(void* fault_address) { +#ifdef __linux__ + return impl->buffer->DeferredMapSeparateHeap(static_cast(fault_address)); +#else + return false; +#endif } } // namespace Core::Memory diff --git a/src/core/memory.h b/src/core/memory.h index c1879e78f..3e4d03f57 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -86,7 +86,8 @@ public: * @param perms The permissions to map the memory with. */ void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, - Common::PhysicalAddress target, Common::MemoryPermission perms); + Common::PhysicalAddress target, Common::MemoryPermission perms, + bool separate_heap); /** * Unmaps a region of the emulated process address space. @@ -95,7 +96,8 @@ public: * @param base The address to begin unmapping at. * @param size The amount of bytes to unmap. */ - void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size); + void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, + bool separate_heap); /** * Protects a region of the emulated process address space with the new permissions. @@ -486,6 +488,7 @@ public: void SetGPUDirtyManagers(std::span managers); void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size); bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); + bool InvalidateSeparateHeap(void* fault_address); void FlushRegion(Common::ProcessAddress dest_addr, size_t size); private: diff --git a/src/tests/common/host_memory.cpp b/src/tests/common/host_memory.cpp index 1a28e862b..cb040c942 100644 --- a/src/tests/common/host_memory.cpp +++ b/src/tests/common/host_memory.cpp @@ -12,6 +12,7 @@ using namespace Common::Literals; static constexpr size_t VIRTUAL_SIZE = 1ULL << 39; static constexpr size_t BACKING_SIZE = 4_GiB; static constexpr auto PERMS = Common::MemoryPermission::ReadWrite; +static constexpr auto HEAP = false; TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") { { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); } @@ -20,7 +21,7 @@ TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") { TEST_CASE("HostMemory: Simple map", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x5000, 0x8000, 0x1000, PERMS); + mem.Map(0x5000, 0x8000, 0x1000, PERMS, HEAP); volatile u8* const data = mem.VirtualBasePointer() + 0x5000; data[0] = 50; @@ -29,8 +30,8 @@ TEST_CASE("HostMemory: Simple map", "[common]") { TEST_CASE("HostMemory: Simple mirror map", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x5000, 0x3000, 0x2000, PERMS); - mem.Map(0x8000, 0x4000, 0x1000, PERMS); + mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP); + mem.Map(0x8000, 0x4000, 0x1000, PERMS, HEAP); volatile u8* const mirror_a = mem.VirtualBasePointer() + 0x5000; volatile u8* const mirror_b = mem.VirtualBasePointer() + 0x8000; @@ -40,116 +41,116 @@ TEST_CASE("HostMemory: Simple mirror map", "[common]") { TEST_CASE("HostMemory: Simple unmap", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x5000, 0x3000, 0x2000, PERMS); + mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP); volatile u8* const data = mem.VirtualBasePointer() + 0x5000; data[75] = 50; REQUIRE(data[75] == 50); - mem.Unmap(0x5000, 0x2000); + mem.Unmap(0x5000, 0x2000, HEAP); } TEST_CASE("HostMemory: Simple unmap and remap", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x5000, 0x3000, 0x2000, PERMS); + mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP); volatile u8* const data = mem.VirtualBasePointer() + 0x5000; data[0] = 50; REQUIRE(data[0] == 50); - mem.Unmap(0x5000, 0x2000); + mem.Unmap(0x5000, 0x2000, HEAP); - mem.Map(0x5000, 0x3000, 0x2000, PERMS); + mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP); REQUIRE(data[0] == 50); - mem.Map(0x7000, 0x2000, 0x5000, PERMS); + mem.Map(0x7000, 0x2000, 0x5000, PERMS, HEAP); REQUIRE(data[0x3000] == 50); } TEST_CASE("HostMemory: Nieche allocation", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x0000, 0, 0x20000, PERMS); - mem.Unmap(0x0000, 0x4000); - mem.Map(0x1000, 0, 0x2000, PERMS); - mem.Map(0x3000, 0, 0x1000, PERMS); - mem.Map(0, 0, 0x1000, PERMS); + mem.Map(0x0000, 0, 0x20000, PERMS, HEAP); + mem.Unmap(0x0000, 0x4000, HEAP); + mem.Map(0x1000, 0, 0x2000, PERMS, HEAP); + mem.Map(0x3000, 0, 0x1000, PERMS, HEAP); + mem.Map(0, 0, 0x1000, PERMS, HEAP); } TEST_CASE("HostMemory: Full unmap", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x8000, 0, 0x4000, PERMS); - mem.Unmap(0x8000, 0x4000); - mem.Map(0x6000, 0, 0x16000, PERMS); + mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); + mem.Unmap(0x8000, 0x4000, HEAP); + mem.Map(0x6000, 0, 0x16000, PERMS, HEAP); } TEST_CASE("HostMemory: Right out of bounds unmap", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x0000, 0, 0x4000, PERMS); - mem.Unmap(0x2000, 0x4000); - mem.Map(0x2000, 0x80000, 0x4000, PERMS); + mem.Map(0x0000, 0, 0x4000, PERMS, HEAP); + mem.Unmap(0x2000, 0x4000, HEAP); + mem.Map(0x2000, 0x80000, 0x4000, PERMS, HEAP); } TEST_CASE("HostMemory: Left out of bounds unmap", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x8000, 0, 0x4000, PERMS); - mem.Unmap(0x6000, 0x4000); - mem.Map(0x8000, 0, 0x2000, PERMS); + mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); + mem.Unmap(0x6000, 0x4000, HEAP); + mem.Map(0x8000, 0, 0x2000, PERMS, HEAP); } TEST_CASE("HostMemory: Multiple placeholder unmap", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x0000, 0, 0x4000, PERMS); - mem.Map(0x4000, 0, 0x1b000, PERMS); - mem.Unmap(0x3000, 0x1c000); - mem.Map(0x3000, 0, 0x20000, PERMS); + mem.Map(0x0000, 0, 0x4000, PERMS, HEAP); + mem.Map(0x4000, 0, 0x1b000, PERMS, HEAP); + mem.Unmap(0x3000, 0x1c000, HEAP); + mem.Map(0x3000, 0, 0x20000, PERMS, HEAP); } TEST_CASE("HostMemory: Unmap between placeholders", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x0000, 0, 0x4000, PERMS); - mem.Map(0x4000, 0, 0x4000, PERMS); - mem.Unmap(0x2000, 0x4000); - mem.Map(0x2000, 0, 0x4000, PERMS); + mem.Map(0x0000, 0, 0x4000, PERMS, HEAP); + mem.Map(0x4000, 0, 0x4000, PERMS, HEAP); + mem.Unmap(0x2000, 0x4000, HEAP); + mem.Map(0x2000, 0, 0x4000, PERMS, HEAP); } TEST_CASE("HostMemory: Unmap to origin", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x4000, 0, 0x4000, PERMS); - mem.Map(0x8000, 0, 0x4000, PERMS); - mem.Unmap(0x4000, 0x4000); - mem.Map(0, 0, 0x4000, PERMS); - mem.Map(0x4000, 0, 0x4000, PERMS); + mem.Map(0x4000, 0, 0x4000, PERMS, HEAP); + mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); + mem.Unmap(0x4000, 0x4000, HEAP); + mem.Map(0, 0, 0x4000, PERMS, HEAP); + mem.Map(0x4000, 0, 0x4000, PERMS, HEAP); } TEST_CASE("HostMemory: Unmap to right", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x4000, 0, 0x4000, PERMS); - mem.Map(0x8000, 0, 0x4000, PERMS); - mem.Unmap(0x8000, 0x4000); - mem.Map(0x8000, 0, 0x4000, PERMS); + mem.Map(0x4000, 0, 0x4000, PERMS, HEAP); + mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); + mem.Unmap(0x8000, 0x4000, HEAP); + mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); } TEST_CASE("HostMemory: Partial right unmap check bindings", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x4000, 0x10000, 0x4000, PERMS); + mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP); volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; ptr[0x1000] = 17; - mem.Unmap(0x6000, 0x2000); + mem.Unmap(0x6000, 0x2000, HEAP); REQUIRE(ptr[0x1000] == 17); } TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x4000, 0x10000, 0x4000, PERMS); + mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP); volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; ptr[0x3000] = 19; ptr[0x3fff] = 12; - mem.Unmap(0x4000, 0x2000); + mem.Unmap(0x4000, 0x2000, HEAP); REQUIRE(ptr[0x3000] == 19); REQUIRE(ptr[0x3fff] == 12); @@ -157,13 +158,13 @@ TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") { TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x4000, 0x10000, 0x4000, PERMS); + mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP); volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; ptr[0x0000] = 19; ptr[0x3fff] = 12; - mem.Unmap(0x1000, 0x2000); + mem.Unmap(0x1000, 0x2000, HEAP); REQUIRE(ptr[0x0000] == 19); REQUIRE(ptr[0x3fff] == 12); @@ -171,14 +172,14 @@ TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") { TEST_CASE("HostMemory: Partial sparse middle unmap and check bindings", "[common]") { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); - mem.Map(0x4000, 0x10000, 0x2000, PERMS); - mem.Map(0x6000, 0x20000, 0x2000, PERMS); + mem.Map(0x4000, 0x10000, 0x2000, PERMS, HEAP); + mem.Map(0x6000, 0x20000, 0x2000, PERMS, HEAP); volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; ptr[0x0000] = 19; ptr[0x3fff] = 12; - mem.Unmap(0x5000, 0x2000); + mem.Unmap(0x5000, 0x2000, HEAP); REQUIRE(ptr[0x0000] == 19); REQUIRE(ptr[0x3fff] == 12);