mirror of
https://github.com/yuzu-emu/yuzu-android
synced 2025-06-08 15:00:57 -07:00
Compare commits
9 Commits
master
...
android-19
Author | SHA1 | Date | |
---|---|---|---|
|
1837979d3d | ||
|
62b4a3292a | ||
|
7207321680 | ||
|
d12d409d34 | ||
|
a66f40b29e | ||
|
4e50342f2b | ||
|
9c89ccd761 | ||
|
a393ec3a71 | ||
|
47e19d5003 |
16
README.md
16
README.md
@ -1,3 +1,19 @@
|
||||
| Pull Request | Commit | Title | Author | Merged? |
|
||||
|----|----|----|----|----|
|
||||
| [12579](https://github.com/yuzu-emu/yuzu-android//pull/12579) | [`66ae60a9e`](https://github.com/yuzu-emu/yuzu-android//pull/12579/files) | Core: Implement Device Mapping & GPU SMMU | [FernandoS27](https://github.com/FernandoS27/) | Yes |
|
||||
| [12610](https://github.com/yuzu-emu/yuzu-android//pull/12610) | [`200b371d1`](https://github.com/yuzu-emu/yuzu-android//pull/12610/files) | server_manager: respond to session close correctly | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12611](https://github.com/yuzu-emu/yuzu-android//pull/12611) | [`2f0b57ca1`](https://github.com/yuzu-emu/yuzu-android//pull/12611/files) | kernel: fix resource management issues | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12612](https://github.com/yuzu-emu/yuzu-android//pull/12612) | [`76880b84f`](https://github.com/yuzu-emu/yuzu-android//pull/12612/files) | fsp-srv: use program registry for SetCurrentProcess | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12652](https://github.com/yuzu-emu/yuzu-android//pull/12652) | [`2a0d707ce`](https://github.com/yuzu-emu/yuzu-android//pull/12652/files) | shader_recompiler: emulate 8-bit and 16-bit storage writes with cas loop | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12659](https://github.com/yuzu-emu/yuzu-android//pull/12659) | [`d94097478`](https://github.com/yuzu-emu/yuzu-android//pull/12659/files) | audio: fetch process object from handle table | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12665](https://github.com/yuzu-emu/yuzu-android//pull/12665) | [`bee22540a`](https://github.com/yuzu-emu/yuzu-android//pull/12665/files) | service: acc: Only save profiles when profiles have changed | [german77](https://github.com/german77/) | Yes |
|
||||
| [12677](https://github.com/yuzu-emu/yuzu-android//pull/12677) | [`d4acdac16`](https://github.com/yuzu-emu/yuzu-android//pull/12677/files) | core: Support multiple modules per patcher | [GPUCode](https://github.com/GPUCode/) | Yes |
|
||||
|
||||
|
||||
End of merge log. You can find the original README.md below the break.
|
||||
|
||||
-----
|
||||
|
||||
<!--
|
||||
SPDX-FileCopyrightText: 2018 yuzu Emulator Project
|
||||
SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
@ -770,8 +770,8 @@ void Java_org_yuzu_yuzu_1emu_NativeLibrary_initializeEmptyUserDirectory(JNIEnv*
|
||||
ASSERT(user_id);
|
||||
|
||||
const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath(
|
||||
EmulationSession::GetInstance().System(), vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser,
|
||||
FileSys::SaveDataType::SaveData, 1, user_id->AsU128(), 0);
|
||||
{}, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData, 1,
|
||||
user_id->AsU128(), 0);
|
||||
|
||||
const auto full_path = Common::FS::ConcatPathSafe(nand_dir, user_save_data_path);
|
||||
if (!Common::FS::CreateParentDirs(full_path)) {
|
||||
@ -878,7 +878,7 @@ jstring Java_org_yuzu_yuzu_1emu_NativeLibrary_getSavePath(JNIEnv* env, jobject j
|
||||
FileSys::Mode::Read);
|
||||
|
||||
const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath(
|
||||
system, vfsNandDir, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData,
|
||||
{}, vfsNandDir, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData,
|
||||
program_id, user_id->AsU128(), 0);
|
||||
return ToJString(env, user_save_data_path);
|
||||
}
|
||||
|
@ -8,8 +8,11 @@
|
||||
#include "audio_core/sink/sink_stream.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/guest_memory.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
|
||||
namespace AudioCore {
|
||||
|
||||
using namespace std::literals;
|
||||
@ -25,7 +28,7 @@ DeviceSession::~DeviceSession() {
|
||||
}
|
||||
|
||||
Result DeviceSession::Initialize(std::string_view name_, SampleFormat sample_format_,
|
||||
u16 channel_count_, size_t session_id_, u32 handle_,
|
||||
u16 channel_count_, size_t session_id_, Kernel::KProcess* handle_,
|
||||
u64 applet_resource_user_id_, Sink::StreamType type_) {
|
||||
if (stream) {
|
||||
Finalize();
|
||||
@ -36,6 +39,7 @@ Result DeviceSession::Initialize(std::string_view name_, SampleFormat sample_for
|
||||
channel_count = channel_count_;
|
||||
session_id = session_id_;
|
||||
handle = handle_;
|
||||
handle->Open();
|
||||
applet_resource_user_id = applet_resource_user_id_;
|
||||
|
||||
if (type == Sink::StreamType::In) {
|
||||
@ -54,6 +58,11 @@ void DeviceSession::Finalize() {
|
||||
sink->CloseStream(stream);
|
||||
stream = nullptr;
|
||||
}
|
||||
|
||||
if (handle) {
|
||||
handle->Close();
|
||||
handle = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void DeviceSession::Start() {
|
||||
@ -91,7 +100,7 @@ void DeviceSession::AppendBuffers(std::span<const AudioBuffer> buffers) {
|
||||
stream->AppendBuffer(new_buffer, tmp_samples);
|
||||
} else {
|
||||
Core::Memory::CpuGuestMemory<s16, Core::Memory::GuestMemoryFlags::UnsafeRead> samples(
|
||||
system.ApplicationMemory(), buffer.samples, buffer.size / sizeof(s16));
|
||||
handle->GetMemory(), buffer.samples, buffer.size / sizeof(s16));
|
||||
stream->AppendBuffer(new_buffer, samples);
|
||||
}
|
||||
}
|
||||
@ -100,7 +109,7 @@ void DeviceSession::AppendBuffers(std::span<const AudioBuffer> buffers) {
|
||||
void DeviceSession::ReleaseBuffer(const AudioBuffer& buffer) const {
|
||||
if (type == Sink::StreamType::In) {
|
||||
auto samples{stream->ReleaseBuffer(buffer.size / sizeof(s16))};
|
||||
system.ApplicationMemory().WriteBlockUnsafe(buffer.samples, samples.data(), buffer.size);
|
||||
handle->GetMemory().WriteBlockUnsafe(buffer.samples, samples.data(), buffer.size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,10 @@ struct EventType;
|
||||
} // namespace Timing
|
||||
} // namespace Core
|
||||
|
||||
namespace Kernel {
|
||||
class KProcess;
|
||||
} // namespace Kernel
|
||||
|
||||
namespace AudioCore {
|
||||
|
||||
namespace Sink {
|
||||
@ -44,13 +48,13 @@ public:
|
||||
* @param sample_format - Sample format for this device's output.
|
||||
* @param channel_count - Number of channels for this device (2 or 6).
|
||||
* @param session_id - This session's id.
|
||||
* @param handle - Handle for this device session (unused).
|
||||
* @param handle - Process handle for this device session.
|
||||
* @param applet_resource_user_id - Applet resource user id for this device session (unused).
|
||||
* @param type - Type of this stream (Render, In, Out).
|
||||
* @return Result code for this call.
|
||||
*/
|
||||
Result Initialize(std::string_view name, SampleFormat sample_format, u16 channel_count,
|
||||
size_t session_id, u32 handle, u64 applet_resource_user_id,
|
||||
size_t session_id, Kernel::KProcess* handle, u64 applet_resource_user_id,
|
||||
Sink::StreamType type);
|
||||
|
||||
/**
|
||||
@ -137,8 +141,8 @@ private:
|
||||
u16 channel_count{};
|
||||
/// Session id of this device session
|
||||
size_t session_id{};
|
||||
/// Handle of this device session
|
||||
u32 handle{};
|
||||
/// Process handle of device memory owner
|
||||
Kernel::KProcess* handle{};
|
||||
/// Applet resource user id of this device session
|
||||
u64 applet_resource_user_id{};
|
||||
/// Total number of samples played by this device session
|
||||
|
@ -57,7 +57,7 @@ Result System::IsConfigValid(const std::string_view device_name,
|
||||
}
|
||||
|
||||
Result System::Initialize(std::string device_name, const AudioInParameter& in_params,
|
||||
const u32 handle_, const u64 applet_resource_user_id_) {
|
||||
Kernel::KProcess* handle_, const u64 applet_resource_user_id_) {
|
||||
auto result{IsConfigValid(device_name, in_params)};
|
||||
if (result.IsError()) {
|
||||
return result;
|
||||
|
@ -19,7 +19,8 @@ class System;
|
||||
|
||||
namespace Kernel {
|
||||
class KEvent;
|
||||
}
|
||||
class KProcess;
|
||||
} // namespace Kernel
|
||||
|
||||
namespace AudioCore::AudioIn {
|
||||
|
||||
@ -93,12 +94,12 @@ public:
|
||||
*
|
||||
* @param device_name - The name of the requested input device.
|
||||
* @param in_params - Input parameters, see AudioInParameter.
|
||||
* @param handle - Unused.
|
||||
* @param handle - Process handle.
|
||||
* @param applet_resource_user_id - Unused.
|
||||
* @return Result code.
|
||||
*/
|
||||
Result Initialize(std::string device_name, const AudioInParameter& in_params, u32 handle,
|
||||
u64 applet_resource_user_id);
|
||||
Result Initialize(std::string device_name, const AudioInParameter& in_params,
|
||||
Kernel::KProcess* handle, u64 applet_resource_user_id);
|
||||
|
||||
/**
|
||||
* Start this system.
|
||||
@ -244,8 +245,8 @@ public:
|
||||
private:
|
||||
/// Core system
|
||||
Core::System& system;
|
||||
/// (Unused)
|
||||
u32 handle{};
|
||||
/// Process handle
|
||||
Kernel::KProcess* handle{};
|
||||
/// (Unused)
|
||||
u64 applet_resource_user_id{};
|
||||
/// Buffer event, signalled when a buffer is ready
|
||||
|
@ -48,8 +48,8 @@ Result System::IsConfigValid(std::string_view device_name,
|
||||
return Service::Audio::ResultInvalidChannelCount;
|
||||
}
|
||||
|
||||
Result System::Initialize(std::string device_name, const AudioOutParameter& in_params, u32 handle_,
|
||||
u64 applet_resource_user_id_) {
|
||||
Result System::Initialize(std::string device_name, const AudioOutParameter& in_params,
|
||||
Kernel::KProcess* handle_, u64 applet_resource_user_id_) {
|
||||
auto result = IsConfigValid(device_name, in_params);
|
||||
if (result.IsError()) {
|
||||
return result;
|
||||
|
@ -19,7 +19,8 @@ class System;
|
||||
|
||||
namespace Kernel {
|
||||
class KEvent;
|
||||
}
|
||||
class KProcess;
|
||||
} // namespace Kernel
|
||||
|
||||
namespace AudioCore::AudioOut {
|
||||
|
||||
@ -84,12 +85,12 @@ public:
|
||||
*
|
||||
* @param device_name - The name of the requested output device.
|
||||
* @param in_params - Input parameters, see AudioOutParameter.
|
||||
* @param handle - Unused.
|
||||
* @param handle - Process handle.
|
||||
* @param applet_resource_user_id - Unused.
|
||||
* @return Result code.
|
||||
*/
|
||||
Result Initialize(std::string device_name, const AudioOutParameter& in_params, u32 handle,
|
||||
u64 applet_resource_user_id);
|
||||
Result Initialize(std::string device_name, const AudioOutParameter& in_params,
|
||||
Kernel::KProcess* handle, u64 applet_resource_user_id);
|
||||
|
||||
/**
|
||||
* Start this system.
|
||||
@ -228,8 +229,8 @@ public:
|
||||
private:
|
||||
/// Core system
|
||||
Core::System& system;
|
||||
/// (Unused)
|
||||
u32 handle{};
|
||||
/// Process handle
|
||||
Kernel::KProcess* handle{};
|
||||
/// (Unused)
|
||||
u64 applet_resource_user_id{};
|
||||
/// Buffer event, signalled when a buffer is ready
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "common/fixed_point.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
#include "core/guest_memory.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace AudioCore::Renderer {
|
||||
|
@ -45,6 +45,7 @@ using f32 = float; ///< 32-bit floating point
|
||||
using f64 = double; ///< 64-bit floating point
|
||||
|
||||
using VAddr = u64; ///< Represents a pointer in the userspace virtual address space.
|
||||
using DAddr = u64; ///< Represents a pointer in the device specific virtual address space.
|
||||
using PAddr = u64; ///< Represents a pointer in the ARM11 physical address space.
|
||||
using GPUVAddr = u64; ///< Represents a pointer in the GPU virtual address space.
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/page_table.h"
|
||||
#include "common/scope_exit.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
@ -11,29 +12,10 @@ PageTable::~PageTable() noexcept = default;
|
||||
|
||||
bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
|
||||
Common::ProcessAddress address) const {
|
||||
// Setup invalid defaults.
|
||||
out_entry->phys_addr = 0;
|
||||
out_entry->block_size = page_size;
|
||||
out_context->next_page = 0;
|
||||
out_context->next_offset = GetInteger(address);
|
||||
out_context->next_page = address / page_size;
|
||||
|
||||
// Validate that we can read the actual entry.
|
||||
const auto page = address / page_size;
|
||||
if (page >= backing_addr.size()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate that the entry is mapped.
|
||||
const auto phys_addr = backing_addr[page];
|
||||
if (phys_addr == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Populate the results.
|
||||
out_entry->phys_addr = phys_addr + GetInteger(address);
|
||||
out_context->next_page = page + 1;
|
||||
out_context->next_offset = GetInteger(address) + page_size;
|
||||
|
||||
return true;
|
||||
return this->ContinueTraversal(out_entry, out_context);
|
||||
}
|
||||
|
||||
bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const {
|
||||
@ -41,6 +23,12 @@ bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* c
|
||||
out_entry->phys_addr = 0;
|
||||
out_entry->block_size = page_size;
|
||||
|
||||
// Regardless of whether the page was mapped, advance on exit.
|
||||
SCOPE_EXIT({
|
||||
context->next_page += 1;
|
||||
context->next_offset += page_size;
|
||||
});
|
||||
|
||||
// Validate that we can read the actual entry.
|
||||
const auto page = context->next_page;
|
||||
if (page >= backing_addr.size()) {
|
||||
@ -55,8 +43,6 @@ bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* c
|
||||
|
||||
// Populate the results.
|
||||
out_entry->phys_addr = phys_addr + context->next_offset;
|
||||
context->next_page = page + 1;
|
||||
context->next_offset += page_size;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -37,6 +37,8 @@ add_library(core STATIC
|
||||
debugger/gdbstub_arch.h
|
||||
debugger/gdbstub.cpp
|
||||
debugger/gdbstub.h
|
||||
device_memory_manager.h
|
||||
device_memory_manager.inc
|
||||
device_memory.cpp
|
||||
device_memory.h
|
||||
file_sys/fssystem/fs_i_storage.h
|
||||
@ -490,6 +492,10 @@ add_library(core STATIC
|
||||
hle/service/filesystem/fsp_pr.h
|
||||
hle/service/filesystem/fsp_srv.cpp
|
||||
hle/service/filesystem/fsp_srv.h
|
||||
hle/service/filesystem/romfs_controller.cpp
|
||||
hle/service/filesystem/romfs_controller.h
|
||||
hle/service/filesystem/save_data_controller.cpp
|
||||
hle/service/filesystem/save_data_controller.h
|
||||
hle/service/fgm/fgm.cpp
|
||||
hle/service/fgm/fgm.h
|
||||
hle/service/friend/friend.cpp
|
||||
@ -605,6 +611,8 @@ add_library(core STATIC
|
||||
hle/service/ns/pdm_qry.h
|
||||
hle/service/nvdrv/core/container.cpp
|
||||
hle/service/nvdrv/core/container.h
|
||||
hle/service/nvdrv/core/heap_mapper.cpp
|
||||
hle/service/nvdrv/core/heap_mapper.h
|
||||
hle/service/nvdrv/core/nvmap.cpp
|
||||
hle/service/nvdrv/core/nvmap.h
|
||||
hle/service/nvdrv/core/syncpoint_manager.cpp
|
||||
|
@ -22,14 +22,10 @@ using NativeExecutionParameters = Kernel::KThread::NativeExecutionParameters;
|
||||
constexpr size_t MaxRelativeBranch = 128_MiB;
|
||||
constexpr u32 ModuleCodeIndex = 0x24 / sizeof(u32);
|
||||
|
||||
Patcher::Patcher() : c(m_patch_instructions) {}
|
||||
|
||||
Patcher::~Patcher() = default;
|
||||
|
||||
void Patcher::PatchText(const Kernel::PhysicalMemory& program_image,
|
||||
const Kernel::CodeSet::Segment& code) {
|
||||
// Branch to the first instruction of the module.
|
||||
this->BranchToModule(0);
|
||||
Patcher::Patcher() : c(m_patch_instructions) {
|
||||
// The first word of the patch section is always a branch to the first instruction of the
|
||||
// module.
|
||||
c.dw(0);
|
||||
|
||||
// Write save context helper function.
|
||||
c.l(m_save_context);
|
||||
@ -38,6 +34,25 @@ void Patcher::PatchText(const Kernel::PhysicalMemory& program_image,
|
||||
// Write load context helper function.
|
||||
c.l(m_load_context);
|
||||
WriteLoadContext();
|
||||
}
|
||||
|
||||
Patcher::~Patcher() = default;
|
||||
|
||||
bool Patcher::PatchText(const Kernel::PhysicalMemory& program_image,
|
||||
const Kernel::CodeSet::Segment& code) {
|
||||
// If we have patched modules but cannot reach the new module, then it needs its own patcher.
|
||||
const size_t image_size = program_image.size();
|
||||
if (total_program_size + image_size > MaxRelativeBranch && total_program_size > 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Add a new module patch to our list
|
||||
modules.emplace_back();
|
||||
curr_patch = &modules.back();
|
||||
|
||||
// The first word of the patch section is always a branch to the first instruction of the
|
||||
// module.
|
||||
curr_patch->m_branch_to_module_relocations.push_back({0, 0});
|
||||
|
||||
// Retrieve text segment data.
|
||||
const auto text = std::span{program_image}.subspan(code.offset, code.size);
|
||||
@ -94,16 +109,17 @@ void Patcher::PatchText(const Kernel::PhysicalMemory& program_image,
|
||||
}
|
||||
|
||||
if (auto exclusive = Exclusive{inst}; exclusive.Verify()) {
|
||||
m_exclusives.push_back(i);
|
||||
curr_patch->m_exclusives.push_back(i);
|
||||
}
|
||||
}
|
||||
|
||||
// Determine patching mode for the final relocation step
|
||||
const size_t image_size = program_image.size();
|
||||
total_program_size += image_size;
|
||||
this->mode = image_size > MaxRelativeBranch ? PatchMode::PreText : PatchMode::PostData;
|
||||
return true;
|
||||
}
|
||||
|
||||
void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
|
||||
bool Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
|
||||
const Kernel::CodeSet::Segment& code,
|
||||
Kernel::PhysicalMemory& program_image,
|
||||
EntryTrampolines* out_trampolines) {
|
||||
@ -120,7 +136,7 @@ void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
|
||||
if (mode == PatchMode::PreText) {
|
||||
rc.B(rel.patch_offset - patch_size - rel.module_offset);
|
||||
} else {
|
||||
rc.B(image_size - rel.module_offset + rel.patch_offset);
|
||||
rc.B(total_program_size - rel.module_offset + rel.patch_offset);
|
||||
}
|
||||
};
|
||||
|
||||
@ -129,7 +145,7 @@ void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
|
||||
if (mode == PatchMode::PreText) {
|
||||
rc.B(patch_size - rel.patch_offset + rel.module_offset);
|
||||
} else {
|
||||
rc.B(rel.module_offset - image_size - rel.patch_offset);
|
||||
rc.B(rel.module_offset - total_program_size - rel.patch_offset);
|
||||
}
|
||||
};
|
||||
|
||||
@ -137,7 +153,7 @@ void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
|
||||
if (mode == PatchMode::PreText) {
|
||||
return GetInteger(load_base) + patch_offset;
|
||||
} else {
|
||||
return GetInteger(load_base) + image_size + patch_offset;
|
||||
return GetInteger(load_base) + total_program_size + patch_offset;
|
||||
}
|
||||
};
|
||||
|
||||
@ -150,32 +166,39 @@ void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
|
||||
};
|
||||
|
||||
// We are now ready to relocate!
|
||||
for (const Relocation& rel : m_branch_to_patch_relocations) {
|
||||
auto& patch = modules[m_relocate_module_index++];
|
||||
for (const Relocation& rel : patch.m_branch_to_patch_relocations) {
|
||||
ApplyBranchToPatchRelocation(text_words.data() + rel.module_offset / sizeof(u32), rel);
|
||||
}
|
||||
for (const Relocation& rel : m_branch_to_module_relocations) {
|
||||
for (const Relocation& rel : patch.m_branch_to_module_relocations) {
|
||||
ApplyBranchToModuleRelocation(m_patch_instructions.data() + rel.patch_offset / sizeof(u32),
|
||||
rel);
|
||||
}
|
||||
|
||||
// Rewrite PC constants and record post trampolines
|
||||
for (const Relocation& rel : m_write_module_pc_relocations) {
|
||||
for (const Relocation& rel : patch.m_write_module_pc_relocations) {
|
||||
oaknut::CodeGenerator rc{m_patch_instructions.data() + rel.patch_offset / sizeof(u32)};
|
||||
rc.dx(RebasePc(rel.module_offset));
|
||||
}
|
||||
for (const Trampoline& rel : m_trampolines) {
|
||||
for (const Trampoline& rel : patch.m_trampolines) {
|
||||
out_trampolines->insert({RebasePc(rel.module_offset), RebasePatch(rel.patch_offset)});
|
||||
}
|
||||
|
||||
// Cortex-A57 seems to treat all exclusives as ordered, but newer processors do not.
|
||||
// Convert to ordered to preserve this assumption.
|
||||
for (const ModuleTextAddress i : m_exclusives) {
|
||||
for (const ModuleTextAddress i : patch.m_exclusives) {
|
||||
auto exclusive = Exclusive{text_words[i]};
|
||||
text_words[i] = exclusive.AsOrdered();
|
||||
}
|
||||
|
||||
// Copy to program image
|
||||
// Remove the patched module size from the total. This is done so total_program_size
|
||||
// always represents the distance from the currently patched module to the patch section.
|
||||
total_program_size -= image_size;
|
||||
|
||||
// Only copy to the program image of the last module
|
||||
if (m_relocate_module_index == modules.size()) {
|
||||
if (this->mode == PatchMode::PreText) {
|
||||
ASSERT(image_size == total_program_size);
|
||||
std::memcpy(program_image.data(), m_patch_instructions.data(),
|
||||
m_patch_instructions.size() * sizeof(u32));
|
||||
} else {
|
||||
@ -183,6 +206,10 @@ void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
|
||||
std::memcpy(program_image.data() + image_size, m_patch_instructions.data(),
|
||||
m_patch_instructions.size() * sizeof(u32));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t Patcher::GetSectionSize() const noexcept {
|
||||
@ -322,7 +349,7 @@ void Patcher::WriteSvcTrampoline(ModuleDestLabel module_dest, u32 svc_id) {
|
||||
|
||||
// Write the post-SVC trampoline address, which will jump back to the guest after restoring its
|
||||
// state.
|
||||
m_trampolines.push_back({c.offset(), module_dest});
|
||||
curr_patch->m_trampolines.push_back({c.offset(), module_dest});
|
||||
|
||||
// Host called this location. Save the return address so we can
|
||||
// unwind the stack properly when jumping back.
|
||||
|
@ -31,9 +31,9 @@ public:
|
||||
explicit Patcher();
|
||||
~Patcher();
|
||||
|
||||
void PatchText(const Kernel::PhysicalMemory& program_image,
|
||||
bool PatchText(const Kernel::PhysicalMemory& program_image,
|
||||
const Kernel::CodeSet::Segment& code);
|
||||
void RelocateAndCopy(Common::ProcessAddress load_base, const Kernel::CodeSet::Segment& code,
|
||||
bool RelocateAndCopy(Common::ProcessAddress load_base, const Kernel::CodeSet::Segment& code,
|
||||
Kernel::PhysicalMemory& program_image, EntryTrampolines* out_trampolines);
|
||||
size_t GetSectionSize() const noexcept;
|
||||
|
||||
@ -61,16 +61,16 @@ private:
|
||||
|
||||
private:
|
||||
void BranchToPatch(uintptr_t module_dest) {
|
||||
m_branch_to_patch_relocations.push_back({c.offset(), module_dest});
|
||||
curr_patch->m_branch_to_patch_relocations.push_back({c.offset(), module_dest});
|
||||
}
|
||||
|
||||
void BranchToModule(uintptr_t module_dest) {
|
||||
m_branch_to_module_relocations.push_back({c.offset(), module_dest});
|
||||
curr_patch->m_branch_to_module_relocations.push_back({c.offset(), module_dest});
|
||||
c.dw(0);
|
||||
}
|
||||
|
||||
void WriteModulePc(uintptr_t module_dest) {
|
||||
m_write_module_pc_relocations.push_back({c.offset(), module_dest});
|
||||
curr_patch->m_write_module_pc_relocations.push_back({c.offset(), module_dest});
|
||||
c.dx(0);
|
||||
}
|
||||
|
||||
@ -84,15 +84,22 @@ private:
|
||||
uintptr_t module_offset; ///< Offset in bytes from the start of the text section.
|
||||
};
|
||||
|
||||
oaknut::VectorCodeGenerator c;
|
||||
struct ModulePatch {
|
||||
std::vector<Trampoline> m_trampolines;
|
||||
std::vector<Relocation> m_branch_to_patch_relocations{};
|
||||
std::vector<Relocation> m_branch_to_module_relocations{};
|
||||
std::vector<Relocation> m_write_module_pc_relocations{};
|
||||
std::vector<ModuleTextAddress> m_exclusives{};
|
||||
};
|
||||
|
||||
oaknut::VectorCodeGenerator c;
|
||||
oaknut::Label m_save_context{};
|
||||
oaknut::Label m_load_context{};
|
||||
PatchMode mode{PatchMode::None};
|
||||
size_t total_program_size{};
|
||||
size_t m_relocate_module_index{};
|
||||
std::vector<ModulePatch> modules;
|
||||
ModulePatch* curr_patch;
|
||||
};
|
||||
|
||||
} // namespace Core::NCE
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "core/file_sys/savedata_factory.h"
|
||||
#include "core/file_sys/vfs_concat.h"
|
||||
#include "core/file_sys/vfs_real.h"
|
||||
#include "core/gpu_dirty_memory_manager.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
@ -413,6 +414,7 @@ struct System::Impl {
|
||||
kernel.ShutdownCores();
|
||||
services.reset();
|
||||
service_manager.reset();
|
||||
fs_controller.Reset();
|
||||
cheat_engine.reset();
|
||||
telemetry_session.reset();
|
||||
time_manager.Shutdown();
|
||||
@ -564,6 +566,9 @@ struct System::Impl {
|
||||
std::array<u64, Core::Hardware::NUM_CPU_CORES> dynarmic_ticks{};
|
||||
std::array<MicroProfileToken, Core::Hardware::NUM_CPU_CORES> microprofile_cpu{};
|
||||
|
||||
std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES>
|
||||
gpu_dirty_memory_managers;
|
||||
|
||||
std::deque<std::vector<u8>> user_channel;
|
||||
};
|
||||
|
||||
@ -650,8 +655,14 @@ size_t System::GetCurrentHostThreadID() const {
|
||||
return impl->kernel.GetCurrentHostThreadID();
|
||||
}
|
||||
|
||||
void System::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) {
|
||||
return this->ApplicationProcess()->GatherGPUDirtyMemory(callback);
|
||||
std::span<GPUDirtyMemoryManager> System::GetGPUDirtyMemoryManager() {
|
||||
return impl->gpu_dirty_memory_managers;
|
||||
}
|
||||
|
||||
void System::GatherGPUDirtyMemory(std::function<void(PAddr, size_t)>& callback) {
|
||||
for (auto& manager : impl->gpu_dirty_memory_managers) {
|
||||
manager.Gather(callback);
|
||||
}
|
||||
}
|
||||
|
||||
PerfStatsResults System::GetAndResetPerfStats() {
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <span>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -116,6 +117,7 @@ class CpuManager;
|
||||
class Debugger;
|
||||
class DeviceMemory;
|
||||
class ExclusiveMonitor;
|
||||
class GPUDirtyMemoryManager;
|
||||
class PerfStats;
|
||||
class Reporter;
|
||||
class SpeedLimiter;
|
||||
@ -224,7 +226,9 @@ public:
|
||||
/// Prepare the core emulation for a reschedule
|
||||
void PrepareReschedule(u32 core_index);
|
||||
|
||||
void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback);
|
||||
std::span<GPUDirtyMemoryManager> GetGPUDirtyMemoryManager();
|
||||
|
||||
void GatherGPUDirtyMemory(std::function<void(PAddr, size_t)>& callback);
|
||||
|
||||
[[nodiscard]] size_t GetCurrentHostThreadID() const;
|
||||
|
||||
|
@ -114,7 +114,7 @@ public:
|
||||
}
|
||||
|
||||
Kernel::KThread* GetActiveThread() override {
|
||||
return state->active_thread;
|
||||
return state->active_thread.GetPointerUnsafe();
|
||||
}
|
||||
|
||||
private:
|
||||
@ -147,11 +147,14 @@ private:
|
||||
|
||||
std::scoped_lock lk{connection_lock};
|
||||
|
||||
// Find the process we are going to debug.
|
||||
SetDebugProcess();
|
||||
|
||||
// Ensure everything is stopped.
|
||||
PauseEmulation();
|
||||
|
||||
// Set up the new frontend.
|
||||
frontend = std::make_unique<GDBStub>(*this, system);
|
||||
frontend = std::make_unique<GDBStub>(*this, system, debug_process.GetPointerUnsafe());
|
||||
|
||||
// Set the new state. This will tear down any existing state.
|
||||
state = ConnectionState{
|
||||
@ -194,15 +197,20 @@ private:
|
||||
UpdateActiveThread();
|
||||
|
||||
if (state->info.type == SignalType::Watchpoint) {
|
||||
frontend->Watchpoint(state->active_thread, *state->info.watchpoint);
|
||||
frontend->Watchpoint(std::addressof(*state->active_thread),
|
||||
*state->info.watchpoint);
|
||||
} else {
|
||||
frontend->Stopped(state->active_thread);
|
||||
frontend->Stopped(std::addressof(*state->active_thread));
|
||||
}
|
||||
|
||||
break;
|
||||
case SignalType::ShuttingDown:
|
||||
frontend->ShuttingDown();
|
||||
|
||||
// Release members.
|
||||
state->active_thread.Reset(nullptr);
|
||||
debug_process.Reset(nullptr);
|
||||
|
||||
// Wait for emulation to shut down gracefully now.
|
||||
state->signal_pipe.close();
|
||||
state->client_socket.shutdown(boost::asio::socket_base::shutdown_both);
|
||||
@ -222,7 +230,7 @@ private:
|
||||
stopped = true;
|
||||
PauseEmulation();
|
||||
UpdateActiveThread();
|
||||
frontend->Stopped(state->active_thread);
|
||||
frontend->Stopped(state->active_thread.GetPointerUnsafe());
|
||||
break;
|
||||
}
|
||||
case DebuggerAction::Continue:
|
||||
@ -232,7 +240,7 @@ private:
|
||||
MarkResumed([&] {
|
||||
state->active_thread->SetStepState(Kernel::StepState::StepPending);
|
||||
state->active_thread->Resume(Kernel::SuspendType::Debug);
|
||||
ResumeEmulation(state->active_thread);
|
||||
ResumeEmulation(state->active_thread.GetPointerUnsafe());
|
||||
});
|
||||
break;
|
||||
case DebuggerAction::StepThreadLocked: {
|
||||
@ -255,6 +263,7 @@ private:
|
||||
}
|
||||
|
||||
void PauseEmulation() {
|
||||
Kernel::KScopedLightLock ll{debug_process->GetListLock()};
|
||||
Kernel::KScopedSchedulerLock sl{system.Kernel()};
|
||||
|
||||
// Put all threads to sleep on next scheduler round.
|
||||
@ -264,6 +273,9 @@ private:
|
||||
}
|
||||
|
||||
void ResumeEmulation(Kernel::KThread* except = nullptr) {
|
||||
Kernel::KScopedLightLock ll{debug_process->GetListLock()};
|
||||
Kernel::KScopedSchedulerLock sl{system.Kernel()};
|
||||
|
||||
// Wake up all threads.
|
||||
for (auto& thread : ThreadList()) {
|
||||
if (std::addressof(thread) == except) {
|
||||
@ -277,15 +289,16 @@ private:
|
||||
|
||||
template <typename Callback>
|
||||
void MarkResumed(Callback&& cb) {
|
||||
Kernel::KScopedSchedulerLock sl{system.Kernel()};
|
||||
stopped = false;
|
||||
cb();
|
||||
}
|
||||
|
||||
void UpdateActiveThread() {
|
||||
Kernel::KScopedLightLock ll{debug_process->GetListLock()};
|
||||
|
||||
auto& threads{ThreadList()};
|
||||
for (auto& thread : threads) {
|
||||
if (std::addressof(thread) == state->active_thread) {
|
||||
if (std::addressof(thread) == state->active_thread.GetPointerUnsafe()) {
|
||||
// Thread is still alive, no need to update.
|
||||
return;
|
||||
}
|
||||
@ -293,12 +306,18 @@ private:
|
||||
state->active_thread = std::addressof(threads.front());
|
||||
}
|
||||
|
||||
private:
|
||||
void SetDebugProcess() {
|
||||
debug_process = std::move(system.Kernel().GetProcessList().back());
|
||||
}
|
||||
|
||||
Kernel::KProcess::ThreadList& ThreadList() {
|
||||
return system.ApplicationProcess()->GetThreadList();
|
||||
return debug_process->GetThreadList();
|
||||
}
|
||||
|
||||
private:
|
||||
System& system;
|
||||
Kernel::KScopedAutoObject<Kernel::KProcess> debug_process;
|
||||
std::unique_ptr<DebuggerFrontend> frontend;
|
||||
|
||||
boost::asio::io_context io_context;
|
||||
@ -310,7 +329,7 @@ private:
|
||||
boost::process::async_pipe signal_pipe;
|
||||
|
||||
SignalInfo info;
|
||||
Kernel::KThread* active_thread;
|
||||
Kernel::KScopedAutoObject<Kernel::KThread> active_thread;
|
||||
std::array<u8, 4096> client_data;
|
||||
bool pipe_data;
|
||||
};
|
||||
|
@ -108,9 +108,9 @@ static std::string EscapeXML(std::string_view data) {
|
||||
return escaped;
|
||||
}
|
||||
|
||||
GDBStub::GDBStub(DebuggerBackend& backend_, Core::System& system_)
|
||||
: DebuggerFrontend(backend_), system{system_} {
|
||||
if (system.ApplicationProcess()->Is64Bit()) {
|
||||
GDBStub::GDBStub(DebuggerBackend& backend_, Core::System& system_, Kernel::KProcess* debug_process_)
|
||||
: DebuggerFrontend(backend_), system{system_}, debug_process{debug_process_} {
|
||||
if (GetProcess()->Is64Bit()) {
|
||||
arch = std::make_unique<GDBStubA64>();
|
||||
} else {
|
||||
arch = std::make_unique<GDBStubA32>();
|
||||
@ -276,7 +276,7 @@ void GDBStub::ExecuteCommand(std::string_view packet, std::vector<DebuggerAction
|
||||
const size_t size{static_cast<size_t>(strtoll(command.data() + sep, nullptr, 16))};
|
||||
|
||||
std::vector<u8> mem(size);
|
||||
if (system.ApplicationMemory().ReadBlock(addr, mem.data(), size)) {
|
||||
if (GetMemory().ReadBlock(addr, mem.data(), size)) {
|
||||
// Restore any bytes belonging to replaced instructions.
|
||||
auto it = replaced_instructions.lower_bound(addr);
|
||||
for (; it != replaced_instructions.end() && it->first < addr + size; it++) {
|
||||
@ -310,8 +310,8 @@ void GDBStub::ExecuteCommand(std::string_view packet, std::vector<DebuggerAction
|
||||
const auto mem_substr{std::string_view(command).substr(mem_sep)};
|
||||
const auto mem{Common::HexStringToVector(mem_substr, false)};
|
||||
|
||||
if (system.ApplicationMemory().WriteBlock(addr, mem.data(), size)) {
|
||||
Core::InvalidateInstructionCacheRange(system.ApplicationProcess(), addr, size);
|
||||
if (GetMemory().WriteBlock(addr, mem.data(), size)) {
|
||||
Core::InvalidateInstructionCacheRange(GetProcess(), addr, size);
|
||||
SendReply(GDB_STUB_REPLY_OK);
|
||||
} else {
|
||||
SendReply(GDB_STUB_REPLY_ERR);
|
||||
@ -353,7 +353,7 @@ void GDBStub::HandleBreakpointInsert(std::string_view command) {
|
||||
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
|
||||
const size_t size{static_cast<size_t>(strtoll(command.data() + size_sep, nullptr, 16))};
|
||||
|
||||
if (!system.ApplicationMemory().IsValidVirtualAddressRange(addr, size)) {
|
||||
if (!GetMemory().IsValidVirtualAddressRange(addr, size)) {
|
||||
SendReply(GDB_STUB_REPLY_ERR);
|
||||
return;
|
||||
}
|
||||
@ -362,22 +362,20 @@ void GDBStub::HandleBreakpointInsert(std::string_view command) {
|
||||
|
||||
switch (type) {
|
||||
case BreakpointType::Software:
|
||||
replaced_instructions[addr] = system.ApplicationMemory().Read32(addr);
|
||||
system.ApplicationMemory().Write32(addr, arch->BreakpointInstruction());
|
||||
Core::InvalidateInstructionCacheRange(system.ApplicationProcess(), addr, sizeof(u32));
|
||||
replaced_instructions[addr] = GetMemory().Read32(addr);
|
||||
GetMemory().Write32(addr, arch->BreakpointInstruction());
|
||||
Core::InvalidateInstructionCacheRange(GetProcess(), addr, sizeof(u32));
|
||||
success = true;
|
||||
break;
|
||||
case BreakpointType::WriteWatch:
|
||||
success = system.ApplicationProcess()->InsertWatchpoint(addr, size,
|
||||
Kernel::DebugWatchpointType::Write);
|
||||
success = GetProcess()->InsertWatchpoint(addr, size, Kernel::DebugWatchpointType::Write);
|
||||
break;
|
||||
case BreakpointType::ReadWatch:
|
||||
success = system.ApplicationProcess()->InsertWatchpoint(addr, size,
|
||||
Kernel::DebugWatchpointType::Read);
|
||||
success = GetProcess()->InsertWatchpoint(addr, size, Kernel::DebugWatchpointType::Read);
|
||||
break;
|
||||
case BreakpointType::AccessWatch:
|
||||
success = system.ApplicationProcess()->InsertWatchpoint(
|
||||
addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
|
||||
success =
|
||||
GetProcess()->InsertWatchpoint(addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
|
||||
break;
|
||||
case BreakpointType::Hardware:
|
||||
default:
|
||||
@ -400,7 +398,7 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
|
||||
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
|
||||
const size_t size{static_cast<size_t>(strtoll(command.data() + size_sep, nullptr, 16))};
|
||||
|
||||
if (!system.ApplicationMemory().IsValidVirtualAddressRange(addr, size)) {
|
||||
if (!GetMemory().IsValidVirtualAddressRange(addr, size)) {
|
||||
SendReply(GDB_STUB_REPLY_ERR);
|
||||
return;
|
||||
}
|
||||
@ -411,24 +409,22 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
|
||||
case BreakpointType::Software: {
|
||||
const auto orig_insn{replaced_instructions.find(addr)};
|
||||
if (orig_insn != replaced_instructions.end()) {
|
||||
system.ApplicationMemory().Write32(addr, orig_insn->second);
|
||||
Core::InvalidateInstructionCacheRange(system.ApplicationProcess(), addr, sizeof(u32));
|
||||
GetMemory().Write32(addr, orig_insn->second);
|
||||
Core::InvalidateInstructionCacheRange(GetProcess(), addr, sizeof(u32));
|
||||
replaced_instructions.erase(addr);
|
||||
success = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BreakpointType::WriteWatch:
|
||||
success = system.ApplicationProcess()->RemoveWatchpoint(addr, size,
|
||||
Kernel::DebugWatchpointType::Write);
|
||||
success = GetProcess()->RemoveWatchpoint(addr, size, Kernel::DebugWatchpointType::Write);
|
||||
break;
|
||||
case BreakpointType::ReadWatch:
|
||||
success = system.ApplicationProcess()->RemoveWatchpoint(addr, size,
|
||||
Kernel::DebugWatchpointType::Read);
|
||||
success = GetProcess()->RemoveWatchpoint(addr, size, Kernel::DebugWatchpointType::Read);
|
||||
break;
|
||||
case BreakpointType::AccessWatch:
|
||||
success = system.ApplicationProcess()->RemoveWatchpoint(
|
||||
addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
|
||||
success =
|
||||
GetProcess()->RemoveWatchpoint(addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
|
||||
break;
|
||||
case BreakpointType::Hardware:
|
||||
default:
|
||||
@ -466,10 +462,10 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||
const auto target_xml{arch->GetTargetXML()};
|
||||
SendReply(PaginateBuffer(target_xml, command.substr(30)));
|
||||
} else if (command.starts_with("Offsets")) {
|
||||
const auto main_offset = Core::FindMainModuleEntrypoint(system.ApplicationProcess());
|
||||
const auto main_offset = Core::FindMainModuleEntrypoint(GetProcess());
|
||||
SendReply(fmt::format("TextSeg={:x}", GetInteger(main_offset)));
|
||||
} else if (command.starts_with("Xfer:libraries:read::")) {
|
||||
auto modules = Core::FindModules(system.ApplicationProcess());
|
||||
auto modules = Core::FindModules(GetProcess());
|
||||
|
||||
std::string buffer;
|
||||
buffer += R"(<?xml version="1.0"?>)";
|
||||
@ -483,7 +479,7 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||
SendReply(PaginateBuffer(buffer, command.substr(21)));
|
||||
} else if (command.starts_with("fThreadInfo")) {
|
||||
// beginning of list
|
||||
const auto& threads = system.ApplicationProcess()->GetThreadList();
|
||||
const auto& threads = GetProcess()->GetThreadList();
|
||||
std::vector<std::string> thread_ids;
|
||||
for (const auto& thread : threads) {
|
||||
thread_ids.push_back(fmt::format("{:x}", thread.GetThreadId()));
|
||||
@ -497,7 +493,7 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||
buffer += R"(<?xml version="1.0"?>)";
|
||||
buffer += "<threads>";
|
||||
|
||||
const auto& threads = system.ApplicationProcess()->GetThreadList();
|
||||
const auto& threads = GetProcess()->GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
auto thread_name{Core::GetThreadName(&thread)};
|
||||
if (!thread_name) {
|
||||
@ -613,7 +609,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
||||
std::string_view command_str{reinterpret_cast<const char*>(&command[0]), command.size()};
|
||||
std::string reply;
|
||||
|
||||
auto* process = system.ApplicationProcess();
|
||||
auto* process = GetProcess();
|
||||
auto& page_table = process->GetPageTable();
|
||||
|
||||
const char* commands = "Commands:\n"
|
||||
@ -714,7 +710,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
||||
}
|
||||
|
||||
Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) {
|
||||
auto& threads{system.ApplicationProcess()->GetThreadList()};
|
||||
auto& threads{GetProcess()->GetThreadList()};
|
||||
for (auto& thread : threads) {
|
||||
if (thread.GetThreadId() == thread_id) {
|
||||
return std::addressof(thread);
|
||||
@ -783,4 +779,12 @@ void GDBStub::SendStatus(char status) {
|
||||
backend.WriteToClient(buf);
|
||||
}
|
||||
|
||||
Kernel::KProcess* GDBStub::GetProcess() {
|
||||
return debug_process;
|
||||
}
|
||||
|
||||
Core::Memory::Memory& GDBStub::GetMemory() {
|
||||
return GetProcess()->GetMemory();
|
||||
}
|
||||
|
||||
} // namespace Core
|
||||
|
@ -12,13 +12,22 @@
|
||||
#include "core/debugger/debugger_interface.h"
|
||||
#include "core/debugger/gdbstub_arch.h"
|
||||
|
||||
namespace Kernel {
|
||||
class KProcess;
|
||||
}
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
|
||||
class System;
|
||||
|
||||
class GDBStub : public DebuggerFrontend {
|
||||
public:
|
||||
explicit GDBStub(DebuggerBackend& backend, Core::System& system);
|
||||
explicit GDBStub(DebuggerBackend& backend, Core::System& system,
|
||||
Kernel::KProcess* debug_process);
|
||||
~GDBStub() override;
|
||||
|
||||
void Connected() override;
|
||||
@ -42,8 +51,12 @@ private:
|
||||
void SendReply(std::string_view data);
|
||||
void SendStatus(char status);
|
||||
|
||||
Kernel::KProcess* GetProcess();
|
||||
Core::Memory::Memory& GetMemory();
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
Kernel::KProcess* debug_process;
|
||||
std::unique_ptr<GDBStubArch> arch;
|
||||
std::vector<char> current_command;
|
||||
std::map<VAddr, u32> replaced_instructions;
|
||||
|
@ -31,6 +31,12 @@ public:
|
||||
DramMemoryMap::Base;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
PAddr GetRawPhysicalAddr(const T* ptr) const {
|
||||
return static_cast<PAddr>(reinterpret_cast<uintptr_t>(ptr) -
|
||||
reinterpret_cast<uintptr_t>(buffer.BackingBasePointer()));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T* GetPointer(Common::PhysicalAddress addr) {
|
||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() +
|
||||
@ -43,6 +49,16 @@ public:
|
||||
(GetInteger(addr) - DramMemoryMap::Base));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T* GetPointerFromRaw(PAddr addr) {
|
||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() + addr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T* GetPointerFromRaw(PAddr addr) const {
|
||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() + addr);
|
||||
}
|
||||
|
||||
Common::HostMemory buffer;
|
||||
};
|
||||
|
||||
|
208
src/core/device_memory_manager.h
Normal file
208
src/core/device_memory_manager.h
Normal file
@ -0,0 +1,208 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <deque>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
#include "common/virtual_buffer.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
constexpr size_t DEVICE_PAGEBITS = 12ULL;
|
||||
constexpr size_t DEVICE_PAGESIZE = 1ULL << DEVICE_PAGEBITS;
|
||||
constexpr size_t DEVICE_PAGEMASK = DEVICE_PAGESIZE - 1ULL;
|
||||
|
||||
class DeviceMemory;
|
||||
|
||||
namespace Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
template <typename DTraits>
|
||||
struct DeviceMemoryManagerAllocator;
|
||||
|
||||
template <typename Traits>
|
||||
class DeviceMemoryManager {
|
||||
using DeviceInterface = typename Traits::DeviceInterface;
|
||||
using DeviceMethods = typename Traits::DeviceMethods;
|
||||
|
||||
public:
|
||||
DeviceMemoryManager(const DeviceMemory& device_memory);
|
||||
~DeviceMemoryManager();
|
||||
|
||||
void BindInterface(DeviceInterface* device_inter);
|
||||
|
||||
DAddr Allocate(size_t size);
|
||||
void AllocateFixed(DAddr start, size_t size);
|
||||
void Free(DAddr start, size_t size);
|
||||
|
||||
void Map(DAddr address, VAddr virtual_address, size_t size, size_t process_id,
|
||||
bool track = false);
|
||||
|
||||
void Unmap(DAddr address, size_t size);
|
||||
|
||||
void TrackContinuityImpl(DAddr address, VAddr virtual_address, size_t size, size_t process_id);
|
||||
void TrackContinuity(DAddr address, VAddr virtual_address, size_t size, size_t process_id) {
|
||||
std::scoped_lock lk(mapping_guard);
|
||||
TrackContinuityImpl(address, virtual_address, size, process_id);
|
||||
}
|
||||
|
||||
// Write / Read
|
||||
template <typename T>
|
||||
T* GetPointer(DAddr address);
|
||||
|
||||
template <typename T>
|
||||
const T* GetPointer(DAddr address) const;
|
||||
|
||||
template <typename Func>
|
||||
void ApplyOpOnPAddr(PAddr address, Common::ScratchBuffer<u32>& buffer, Func&& operation) {
|
||||
DAddr subbits = static_cast<DAddr>(address & page_mask);
|
||||
const u32 base = compressed_device_addr[(address >> page_bits)];
|
||||
if ((base >> MULTI_FLAG_BITS) == 0) [[likely]] {
|
||||
const DAddr d_address = (static_cast<DAddr>(base) << page_bits) + subbits;
|
||||
operation(d_address);
|
||||
return;
|
||||
}
|
||||
InnerGatherDeviceAddresses(buffer, address);
|
||||
for (u32 value : buffer) {
|
||||
operation((static_cast<DAddr>(value) << page_bits) + subbits);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void ApplyOpOnPointer(const u8* p, Common::ScratchBuffer<u32>& buffer, Func&& operation) {
|
||||
PAddr address = GetRawPhysicalAddr<u8>(p);
|
||||
ApplyOpOnPAddr(address, buffer, operation);
|
||||
}
|
||||
|
||||
PAddr GetPhysicalRawAddressFromDAddr(DAddr address) const {
|
||||
PAddr subbits = static_cast<PAddr>(address & page_mask);
|
||||
auto paddr = compressed_physical_ptr[(address >> page_bits)];
|
||||
if (paddr == 0) {
|
||||
return 0;
|
||||
}
|
||||
return (static_cast<PAddr>(paddr - 1) << page_bits) + subbits;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void Write(DAddr address, T value);
|
||||
|
||||
template <typename T>
|
||||
T Read(DAddr address) const;
|
||||
|
||||
u8* GetSpan(const DAddr src_addr, const std::size_t size);
|
||||
const u8* GetSpan(const DAddr src_addr, const std::size_t size) const;
|
||||
|
||||
void ReadBlock(DAddr address, void* dest_pointer, size_t size);
|
||||
void ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size);
|
||||
void WriteBlock(DAddr address, const void* src_pointer, size_t size);
|
||||
void WriteBlockUnsafe(DAddr address, const void* src_pointer, size_t size);
|
||||
|
||||
size_t RegisterProcess(Memory::Memory* memory);
|
||||
void UnregisterProcess(size_t id);
|
||||
|
||||
void UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta);
|
||||
|
||||
static constexpr size_t AS_BITS = Traits::device_virtual_bits;
|
||||
|
||||
private:
|
||||
static constexpr size_t device_virtual_bits = Traits::device_virtual_bits;
|
||||
static constexpr size_t device_as_size = 1ULL << device_virtual_bits;
|
||||
static constexpr size_t physical_min_bits = 32;
|
||||
static constexpr size_t physical_max_bits = 33;
|
||||
static constexpr size_t page_bits = 12;
|
||||
static constexpr size_t page_size = 1ULL << page_bits;
|
||||
static constexpr size_t page_mask = page_size - 1ULL;
|
||||
static constexpr u32 physical_address_base = 1U << page_bits;
|
||||
static constexpr u32 MULTI_FLAG_BITS = 31;
|
||||
static constexpr u32 MULTI_FLAG = 1U << MULTI_FLAG_BITS;
|
||||
static constexpr u32 MULTI_MASK = ~MULTI_FLAG;
|
||||
|
||||
template <typename T>
|
||||
T* GetPointerFromRaw(PAddr addr) {
|
||||
return reinterpret_cast<T*>(physical_base + addr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T* GetPointerFromRaw(PAddr addr) const {
|
||||
return reinterpret_cast<T*>(physical_base + addr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
PAddr GetRawPhysicalAddr(const T* ptr) const {
|
||||
return static_cast<PAddr>(reinterpret_cast<uintptr_t>(ptr) - physical_base);
|
||||
}
|
||||
|
||||
void WalkBlock(const DAddr addr, const std::size_t size, auto on_unmapped, auto on_memory,
|
||||
auto increment);
|
||||
|
||||
void InnerGatherDeviceAddresses(Common::ScratchBuffer<u32>& buffer, PAddr address);
|
||||
|
||||
std::unique_ptr<DeviceMemoryManagerAllocator<Traits>> impl;
|
||||
|
||||
const uintptr_t physical_base;
|
||||
DeviceInterface* device_inter;
|
||||
Common::VirtualBuffer<u32> compressed_physical_ptr;
|
||||
Common::VirtualBuffer<u32> compressed_device_addr;
|
||||
Common::VirtualBuffer<u32> continuity_tracker;
|
||||
|
||||
// Process memory interfaces
|
||||
|
||||
std::deque<size_t> id_pool;
|
||||
std::deque<Memory::Memory*> registered_processes;
|
||||
|
||||
// Memory protection management
|
||||
|
||||
static constexpr size_t guest_max_as_bits = 39;
|
||||
static constexpr size_t guest_as_size = 1ULL << guest_max_as_bits;
|
||||
static constexpr size_t guest_mask = guest_as_size - 1ULL;
|
||||
static constexpr size_t process_id_start_bit = guest_max_as_bits;
|
||||
|
||||
std::pair<size_t, VAddr> ExtractCPUBacking(size_t page_index) {
|
||||
auto content = cpu_backing_address[page_index];
|
||||
const VAddr address = content & guest_mask;
|
||||
const size_t process_id = static_cast<size_t>(content >> process_id_start_bit);
|
||||
return std::make_pair(process_id, address);
|
||||
}
|
||||
|
||||
void InsertCPUBacking(size_t page_index, VAddr address, size_t process_id) {
|
||||
cpu_backing_address[page_index] = address | (process_id << process_id_start_bit);
|
||||
}
|
||||
|
||||
Common::VirtualBuffer<VAddr> cpu_backing_address;
|
||||
static constexpr size_t subentries = 8 / sizeof(u8);
|
||||
static constexpr size_t subentries_mask = subentries - 1;
|
||||
class CounterEntry final {
|
||||
public:
|
||||
CounterEntry() = default;
|
||||
|
||||
std::atomic_uint8_t& Count(std::size_t page) {
|
||||
return values[page & subentries_mask];
|
||||
}
|
||||
|
||||
const std::atomic_uint8_t& Count(std::size_t page) const {
|
||||
return values[page & subentries_mask];
|
||||
}
|
||||
|
||||
private:
|
||||
std::array<std::atomic_uint8_t, subentries> values{};
|
||||
};
|
||||
static_assert(sizeof(CounterEntry) == subentries * sizeof(u8),
|
||||
"CounterEntry should be 8 bytes!");
|
||||
|
||||
static constexpr size_t num_counter_entries =
|
||||
(1ULL << (device_virtual_bits - page_bits)) / subentries;
|
||||
using CachedPages = std::array<CounterEntry, num_counter_entries>;
|
||||
std::unique_ptr<CachedPages> cached_pages;
|
||||
std::mutex counter_guard;
|
||||
std::mutex mapping_guard;
|
||||
};
|
||||
|
||||
} // namespace Core
|
588
src/core/device_memory_manager.inc
Normal file
588
src/core/device_memory_manager.inc
Normal file
@ -0,0 +1,588 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <atomic>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
|
||||
#include "common/address_space.h"
|
||||
#include "common/address_space.inc"
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/div_ceil.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/device_memory_manager.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
namespace {
|
||||
|
||||
class MultiAddressContainer {
|
||||
public:
|
||||
MultiAddressContainer() = default;
|
||||
~MultiAddressContainer() = default;
|
||||
|
||||
void GatherValues(u32 start_entry, Common::ScratchBuffer<u32>& buffer) {
|
||||
buffer.resize(8);
|
||||
buffer.resize(0);
|
||||
size_t index = 0;
|
||||
const auto add_value = [&](u32 value) {
|
||||
buffer[index] = value;
|
||||
index++;
|
||||
buffer.resize(index);
|
||||
};
|
||||
|
||||
u32 iter_entry = start_entry;
|
||||
Entry* current = &storage[iter_entry - 1];
|
||||
add_value(current->value);
|
||||
while (current->next_entry != 0) {
|
||||
iter_entry = current->next_entry;
|
||||
current = &storage[iter_entry - 1];
|
||||
add_value(current->value);
|
||||
}
|
||||
}
|
||||
|
||||
u32 Register(u32 value) {
|
||||
return RegisterImplementation(value);
|
||||
}
|
||||
|
||||
void Register(u32 value, u32 start_entry) {
|
||||
auto entry_id = RegisterImplementation(value);
|
||||
u32 iter_entry = start_entry;
|
||||
Entry* current = &storage[iter_entry - 1];
|
||||
while (current->next_entry != 0) {
|
||||
iter_entry = current->next_entry;
|
||||
current = &storage[iter_entry - 1];
|
||||
}
|
||||
current->next_entry = entry_id;
|
||||
}
|
||||
|
||||
std::pair<bool, u32> Unregister(u32 value, u32 start_entry) {
|
||||
u32 iter_entry = start_entry;
|
||||
Entry* previous{};
|
||||
Entry* current = &storage[iter_entry - 1];
|
||||
Entry* next{};
|
||||
bool more_than_one_remaining = false;
|
||||
u32 result_start{start_entry};
|
||||
size_t count = 0;
|
||||
while (current->value != value) {
|
||||
count++;
|
||||
previous = current;
|
||||
iter_entry = current->next_entry;
|
||||
current = &storage[iter_entry - 1];
|
||||
}
|
||||
// Find next
|
||||
u32 next_entry = current->next_entry;
|
||||
if (next_entry != 0) {
|
||||
next = &storage[next_entry - 1];
|
||||
more_than_one_remaining = next->next_entry != 0 || previous != nullptr;
|
||||
}
|
||||
if (previous) {
|
||||
previous->next_entry = next_entry;
|
||||
} else {
|
||||
result_start = next_entry;
|
||||
}
|
||||
free_entries.emplace_back(iter_entry);
|
||||
return std::make_pair(more_than_one_remaining || count > 1, result_start);
|
||||
}
|
||||
|
||||
u32 ReleaseEntry(u32 start_entry) {
|
||||
Entry* current = &storage[start_entry - 1];
|
||||
free_entries.emplace_back(start_entry);
|
||||
return current->value;
|
||||
}
|
||||
|
||||
private:
|
||||
u32 RegisterImplementation(u32 value) {
|
||||
auto entry_id = GetNewEntry();
|
||||
auto& entry = storage[entry_id - 1];
|
||||
entry.next_entry = 0;
|
||||
entry.value = value;
|
||||
return entry_id;
|
||||
}
|
||||
u32 GetNewEntry() {
|
||||
if (!free_entries.empty()) {
|
||||
u32 result = free_entries.front();
|
||||
free_entries.pop_front();
|
||||
return result;
|
||||
}
|
||||
storage.emplace_back();
|
||||
u32 new_entry = static_cast<u32>(storage.size());
|
||||
return new_entry;
|
||||
}
|
||||
|
||||
struct Entry {
|
||||
u32 next_entry{};
|
||||
u32 value{};
|
||||
};
|
||||
|
||||
std::deque<Entry> storage;
|
||||
std::deque<u32> free_entries;
|
||||
};
|
||||
|
||||
struct EmptyAllocator {
|
||||
EmptyAllocator([[maybe_unused]] DAddr address) {}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
template <typename DTraits>
|
||||
struct DeviceMemoryManagerAllocator {
|
||||
static constexpr size_t device_virtual_bits = DTraits::device_virtual_bits;
|
||||
static constexpr DAddr first_address = 1ULL << Memory::YUZU_PAGEBITS;
|
||||
static constexpr DAddr max_device_area = 1ULL << device_virtual_bits;
|
||||
|
||||
DeviceMemoryManagerAllocator() : main_allocator(first_address) {}
|
||||
|
||||
Common::FlatAllocator<DAddr, 0, device_virtual_bits> main_allocator;
|
||||
MultiAddressContainer multi_dev_address;
|
||||
|
||||
/// Returns true when vaddr -> vaddr+size is fully contained in the buffer
|
||||
template <bool pin_area>
|
||||
[[nodiscard]] bool IsInBounds(VAddr addr, u64 size) const noexcept {
|
||||
return addr >= 0 && addr + size <= max_device_area;
|
||||
}
|
||||
|
||||
DAddr Allocate(size_t size) {
|
||||
return main_allocator.Allocate(size);
|
||||
}
|
||||
|
||||
void AllocateFixed(DAddr b_address, size_t b_size) {
|
||||
main_allocator.AllocateFixed(b_address, b_size);
|
||||
}
|
||||
|
||||
void Free(DAddr b_address, size_t b_size) {
|
||||
main_allocator.Free(b_address, b_size);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Traits>
|
||||
DeviceMemoryManager<Traits>::DeviceMemoryManager(const DeviceMemory& device_memory_)
|
||||
: physical_base{reinterpret_cast<const uintptr_t>(device_memory_.buffer.BackingBasePointer())},
|
||||
device_inter{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS),
|
||||
compressed_device_addr(1ULL << ((Settings::values.memory_layout_mode.GetValue() ==
|
||||
Settings::MemoryLayout::Memory_4Gb
|
||||
? physical_min_bits
|
||||
: physical_max_bits) -
|
||||
Memory::YUZU_PAGEBITS)),
|
||||
continuity_tracker(device_as_size >> Memory::YUZU_PAGEBITS),
|
||||
cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) {
|
||||
impl = std::make_unique<DeviceMemoryManagerAllocator<Traits>>();
|
||||
cached_pages = std::make_unique<CachedPages>();
|
||||
|
||||
const size_t total_virtual = device_as_size >> Memory::YUZU_PAGEBITS;
|
||||
for (size_t i = 0; i < total_virtual; i++) {
|
||||
compressed_physical_ptr[i] = 0;
|
||||
continuity_tracker[i] = 1;
|
||||
cpu_backing_address[i] = 0;
|
||||
}
|
||||
const size_t total_phys = 1ULL << ((Settings::values.memory_layout_mode.GetValue() ==
|
||||
Settings::MemoryLayout::Memory_4Gb
|
||||
? physical_min_bits
|
||||
: physical_max_bits) -
|
||||
Memory::YUZU_PAGEBITS);
|
||||
for (size_t i = 0; i < total_phys; i++) {
|
||||
compressed_device_addr[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
DeviceMemoryManager<Traits>::~DeviceMemoryManager() = default;
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::BindInterface(DeviceInterface* device_inter_) {
|
||||
device_inter = device_inter_;
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
DAddr DeviceMemoryManager<Traits>::Allocate(size_t size) {
|
||||
return impl->Allocate(size);
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::AllocateFixed(DAddr start, size_t size) {
|
||||
return impl->AllocateFixed(start, size);
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::Free(DAddr start, size_t size) {
|
||||
impl->Free(start, size);
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::Map(DAddr address, VAddr virtual_address, size_t size,
|
||||
size_t process_id, bool track) {
|
||||
Core::Memory::Memory* process_memory = registered_processes[process_id];
|
||||
size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
|
||||
size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
|
||||
std::scoped_lock lk(mapping_guard);
|
||||
for (size_t i = 0; i < num_pages; i++) {
|
||||
const VAddr new_vaddress = virtual_address + i * Memory::YUZU_PAGESIZE;
|
||||
auto* ptr = process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress));
|
||||
if (ptr == nullptr) [[unlikely]] {
|
||||
compressed_physical_ptr[start_page_d + i] = 0;
|
||||
continue;
|
||||
}
|
||||
auto phys_addr = static_cast<u32>(GetRawPhysicalAddr(ptr) >> Memory::YUZU_PAGEBITS) + 1U;
|
||||
compressed_physical_ptr[start_page_d + i] = phys_addr;
|
||||
InsertCPUBacking(start_page_d + i, new_vaddress, process_id);
|
||||
const u32 base_dev = compressed_device_addr[phys_addr - 1U];
|
||||
const u32 new_dev = static_cast<u32>(start_page_d + i);
|
||||
if (base_dev == 0) [[likely]] {
|
||||
compressed_device_addr[phys_addr - 1U] = new_dev;
|
||||
continue;
|
||||
}
|
||||
u32 start_id = base_dev & MULTI_MASK;
|
||||
if ((base_dev >> MULTI_FLAG_BITS) == 0) {
|
||||
start_id = impl->multi_dev_address.Register(base_dev);
|
||||
compressed_device_addr[phys_addr - 1U] = MULTI_FLAG | start_id;
|
||||
}
|
||||
impl->multi_dev_address.Register(new_dev, start_id);
|
||||
}
|
||||
if (track) {
|
||||
TrackContinuityImpl(address, virtual_address, size, process_id);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::Unmap(DAddr address, size_t size) {
|
||||
size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
|
||||
size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
|
||||
device_inter->InvalidateRegion(address, size);
|
||||
std::scoped_lock lk(mapping_guard);
|
||||
for (size_t i = 0; i < num_pages; i++) {
|
||||
auto phys_addr = compressed_physical_ptr[start_page_d + i];
|
||||
compressed_physical_ptr[start_page_d + i] = 0;
|
||||
cpu_backing_address[start_page_d + i] = 0;
|
||||
if (phys_addr != 0) [[likely]] {
|
||||
const u32 base_dev = compressed_device_addr[phys_addr - 1U];
|
||||
if ((base_dev >> MULTI_FLAG_BITS) == 0) [[likely]] {
|
||||
compressed_device_addr[phys_addr - 1] = 0;
|
||||
continue;
|
||||
}
|
||||
const auto [more_entries, new_start] = impl->multi_dev_address.Unregister(
|
||||
static_cast<u32>(start_page_d + i), base_dev & MULTI_MASK);
|
||||
if (!more_entries) {
|
||||
compressed_device_addr[phys_addr - 1] =
|
||||
impl->multi_dev_address.ReleaseEntry(new_start);
|
||||
continue;
|
||||
}
|
||||
compressed_device_addr[phys_addr - 1] = new_start | MULTI_FLAG;
|
||||
}
|
||||
}
|
||||
}
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::TrackContinuityImpl(DAddr address, VAddr virtual_address,
|
||||
size_t size, size_t process_id) {
|
||||
Core::Memory::Memory* process_memory = registered_processes[process_id];
|
||||
size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
|
||||
size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
|
||||
uintptr_t last_ptr = 0;
|
||||
size_t page_count = 1;
|
||||
for (size_t i = num_pages; i > 0; i--) {
|
||||
size_t index = i - 1;
|
||||
const VAddr new_vaddress = virtual_address + index * Memory::YUZU_PAGESIZE;
|
||||
const uintptr_t new_ptr = reinterpret_cast<uintptr_t>(
|
||||
process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress)));
|
||||
if (new_ptr + page_size == last_ptr) {
|
||||
page_count++;
|
||||
} else {
|
||||
page_count = 1;
|
||||
}
|
||||
last_ptr = new_ptr;
|
||||
continuity_tracker[start_page_d + index] = static_cast<u32>(page_count);
|
||||
}
|
||||
}
|
||||
template <typename Traits>
|
||||
u8* DeviceMemoryManager<Traits>::GetSpan(const DAddr src_addr, const std::size_t size) {
|
||||
size_t page_index = src_addr >> page_bits;
|
||||
size_t subbits = src_addr & page_mask;
|
||||
if ((static_cast<size_t>(continuity_tracker[page_index]) << page_bits) >= size + subbits) {
|
||||
return GetPointer<u8>(src_addr);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
const u8* DeviceMemoryManager<Traits>::GetSpan(const DAddr src_addr, const std::size_t size) const {
|
||||
size_t page_index = src_addr >> page_bits;
|
||||
size_t subbits = src_addr & page_mask;
|
||||
if ((static_cast<size_t>(continuity_tracker[page_index]) << page_bits) >= size + subbits) {
|
||||
return GetPointer<u8>(src_addr);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::InnerGatherDeviceAddresses(Common::ScratchBuffer<u32>& buffer,
|
||||
PAddr address) {
|
||||
size_t phys_addr = address >> page_bits;
|
||||
std::scoped_lock lk(mapping_guard);
|
||||
u32 backing = compressed_device_addr[phys_addr];
|
||||
if ((backing >> MULTI_FLAG_BITS) != 0) {
|
||||
impl->multi_dev_address.GatherValues(backing & MULTI_MASK, buffer);
|
||||
return;
|
||||
}
|
||||
buffer.resize(1);
|
||||
buffer[0] = backing;
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
template <typename T>
|
||||
T* DeviceMemoryManager<Traits>::GetPointer(DAddr address) {
|
||||
const size_t index = address >> Memory::YUZU_PAGEBITS;
|
||||
const size_t offset = address & Memory::YUZU_PAGEMASK;
|
||||
auto phys_addr = compressed_physical_ptr[index];
|
||||
if (phys_addr == 0) [[unlikely]] {
|
||||
return nullptr;
|
||||
}
|
||||
return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) +
|
||||
offset);
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
template <typename T>
|
||||
const T* DeviceMemoryManager<Traits>::GetPointer(DAddr address) const {
|
||||
const size_t index = address >> Memory::YUZU_PAGEBITS;
|
||||
const size_t offset = address & Memory::YUZU_PAGEMASK;
|
||||
auto phys_addr = compressed_physical_ptr[index];
|
||||
if (phys_addr == 0) [[unlikely]] {
|
||||
return nullptr;
|
||||
}
|
||||
return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) +
|
||||
offset);
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
template <typename T>
|
||||
void DeviceMemoryManager<Traits>::Write(DAddr address, T value) {
|
||||
T* ptr = GetPointer<T>(address);
|
||||
if (!ptr) [[unlikely]] {
|
||||
return;
|
||||
}
|
||||
std::memcpy(ptr, &value, sizeof(T));
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
template <typename T>
|
||||
T DeviceMemoryManager<Traits>::Read(DAddr address) const {
|
||||
const T* ptr = GetPointer<T>(address);
|
||||
T result{};
|
||||
if (!ptr) [[unlikely]] {
|
||||
return result;
|
||||
}
|
||||
std::memcpy(&result, ptr, sizeof(T));
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto on_unmapped,
|
||||
auto on_memory, auto increment) {
|
||||
std::size_t remaining_size = size;
|
||||
std::size_t page_index = addr >> Memory::YUZU_PAGEBITS;
|
||||
std::size_t page_offset = addr & Memory::YUZU_PAGEMASK;
|
||||
|
||||
while (remaining_size) {
|
||||
const size_t next_pages = static_cast<std::size_t>(continuity_tracker[page_index]);
|
||||
const std::size_t copy_amount =
|
||||
std::min((next_pages << Memory::YUZU_PAGEBITS) - page_offset, remaining_size);
|
||||
const auto current_vaddr =
|
||||
static_cast<u64>((page_index << Memory::YUZU_PAGEBITS) + page_offset);
|
||||
SCOPE_EXIT({
|
||||
page_index += next_pages;
|
||||
page_offset = 0;
|
||||
increment(copy_amount);
|
||||
remaining_size -= copy_amount;
|
||||
});
|
||||
|
||||
auto phys_addr = compressed_physical_ptr[page_index];
|
||||
if (phys_addr == 0) {
|
||||
on_unmapped(copy_amount, current_vaddr);
|
||||
continue;
|
||||
}
|
||||
auto* mem_ptr = GetPointerFromRaw<u8>(
|
||||
(static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) + page_offset);
|
||||
on_memory(copy_amount, mem_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::ReadBlock(DAddr address, void* dest_pointer, size_t size) {
|
||||
device_inter->FlushRegion(address, size);
|
||||
WalkBlock(
|
||||
address, size,
|
||||
[&](size_t copy_amount, DAddr current_vaddr) {
|
||||
LOG_ERROR(
|
||||
HW_Memory,
|
||||
"Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
||||
current_vaddr, address, size);
|
||||
std::memset(dest_pointer, 0, copy_amount);
|
||||
},
|
||||
[&](size_t copy_amount, const u8* const src_ptr) {
|
||||
std::memcpy(dest_pointer, src_ptr, copy_amount);
|
||||
},
|
||||
[&](const std::size_t copy_amount) {
|
||||
dest_pointer = static_cast<u8*>(dest_pointer) + copy_amount;
|
||||
});
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::WriteBlock(DAddr address, const void* src_pointer, size_t size) {
|
||||
WalkBlock(
|
||||
address, size,
|
||||
[&](size_t copy_amount, DAddr current_vaddr) {
|
||||
LOG_ERROR(
|
||||
HW_Memory,
|
||||
"Unmapped Device WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
||||
current_vaddr, address, size);
|
||||
},
|
||||
[&](size_t copy_amount, u8* const dst_ptr) {
|
||||
std::memcpy(dst_ptr, src_pointer, copy_amount);
|
||||
},
|
||||
[&](const std::size_t copy_amount) {
|
||||
src_pointer = static_cast<const u8*>(src_pointer) + copy_amount;
|
||||
});
|
||||
device_inter->InvalidateRegion(address, size);
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size) {
|
||||
WalkBlock(
|
||||
address, size,
|
||||
[&](size_t copy_amount, DAddr current_vaddr) {
|
||||
LOG_ERROR(
|
||||
HW_Memory,
|
||||
"Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
||||
current_vaddr, address, size);
|
||||
std::memset(dest_pointer, 0, copy_amount);
|
||||
},
|
||||
[&](size_t copy_amount, const u8* const src_ptr) {
|
||||
std::memcpy(dest_pointer, src_ptr, copy_amount);
|
||||
},
|
||||
[&](const std::size_t copy_amount) {
|
||||
dest_pointer = static_cast<u8*>(dest_pointer) + copy_amount;
|
||||
});
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::WriteBlockUnsafe(DAddr address, const void* src_pointer,
|
||||
size_t size) {
|
||||
WalkBlock(
|
||||
address, size,
|
||||
[&](size_t copy_amount, DAddr current_vaddr) {
|
||||
LOG_ERROR(
|
||||
HW_Memory,
|
||||
"Unmapped Device WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
||||
current_vaddr, address, size);
|
||||
},
|
||||
[&](size_t copy_amount, u8* const dst_ptr) {
|
||||
std::memcpy(dst_ptr, src_pointer, copy_amount);
|
||||
},
|
||||
[&](const std::size_t copy_amount) {
|
||||
src_pointer = static_cast<const u8*>(src_pointer) + copy_amount;
|
||||
});
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
size_t DeviceMemoryManager<Traits>::RegisterProcess(Memory::Memory* memory_device_inter) {
|
||||
size_t new_id;
|
||||
if (!id_pool.empty()) {
|
||||
new_id = id_pool.front();
|
||||
id_pool.pop_front();
|
||||
registered_processes[new_id] = memory_device_inter;
|
||||
} else {
|
||||
registered_processes.emplace_back(memory_device_inter);
|
||||
new_id = registered_processes.size() - 1U;
|
||||
}
|
||||
return new_id;
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::UnregisterProcess(size_t id) {
|
||||
registered_processes[id] = nullptr;
|
||||
id_pool.push_front(id);
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta) {
|
||||
bool locked = false;
|
||||
auto lock = [&] {
|
||||
if (!locked) {
|
||||
counter_guard.lock();
|
||||
locked = true;
|
||||
}
|
||||
};
|
||||
SCOPE_EXIT({
|
||||
if (locked) {
|
||||
counter_guard.unlock();
|
||||
}
|
||||
});
|
||||
u64 uncache_begin = 0;
|
||||
u64 cache_begin = 0;
|
||||
u64 uncache_bytes = 0;
|
||||
u64 cache_bytes = 0;
|
||||
const auto MarkRegionCaching = &DeviceMemoryManager<Traits>::DeviceMethods::MarkRegionCaching;
|
||||
|
||||
std::atomic_thread_fence(std::memory_order_acquire);
|
||||
const size_t page_end = Common::DivCeil(addr + size, Memory::YUZU_PAGESIZE);
|
||||
size_t page = addr >> Memory::YUZU_PAGEBITS;
|
||||
auto [process_id, base_vaddress] = ExtractCPUBacking(page);
|
||||
size_t vpage = base_vaddress >> Memory::YUZU_PAGEBITS;
|
||||
auto* memory_device_inter = registered_processes[process_id];
|
||||
for (; page != page_end; ++page) {
|
||||
std::atomic_uint8_t& count = cached_pages->at(page >> 3).Count(page);
|
||||
|
||||
if (delta > 0) {
|
||||
ASSERT_MSG(count.load(std::memory_order::relaxed) < std::numeric_limits<u8>::max(),
|
||||
"Count may overflow!");
|
||||
} else if (delta < 0) {
|
||||
ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!");
|
||||
} else {
|
||||
ASSERT_MSG(false, "Delta must be non-zero!");
|
||||
}
|
||||
|
||||
// Adds or subtracts 1, as count is a unsigned 8-bit value
|
||||
count.fetch_add(static_cast<u8>(delta), std::memory_order_release);
|
||||
|
||||
// Assume delta is either -1 or 1
|
||||
if (count.load(std::memory_order::relaxed) == 0) {
|
||||
if (uncache_bytes == 0) {
|
||||
uncache_begin = vpage;
|
||||
}
|
||||
uncache_bytes += Memory::YUZU_PAGESIZE;
|
||||
} else if (uncache_bytes > 0) {
|
||||
lock();
|
||||
MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS,
|
||||
uncache_bytes, false);
|
||||
uncache_bytes = 0;
|
||||
}
|
||||
if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
|
||||
if (cache_bytes == 0) {
|
||||
cache_begin = vpage;
|
||||
}
|
||||
cache_bytes += Memory::YUZU_PAGESIZE;
|
||||
} else if (cache_bytes > 0) {
|
||||
lock();
|
||||
MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
|
||||
true);
|
||||
cache_bytes = 0;
|
||||
}
|
||||
vpage++;
|
||||
}
|
||||
if (uncache_bytes > 0) {
|
||||
lock();
|
||||
MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes,
|
||||
false);
|
||||
}
|
||||
if (cache_bytes > 0) {
|
||||
lock();
|
||||
MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Core
|
@ -97,8 +97,9 @@ std::string SaveDataAttribute::DebugInfo() const {
|
||||
static_cast<u8>(rank), index);
|
||||
}
|
||||
|
||||
SaveDataFactory::SaveDataFactory(Core::System& system_, VirtualDir save_directory_)
|
||||
: dir{std::move(save_directory_)}, system{system_} {
|
||||
SaveDataFactory::SaveDataFactory(Core::System& system_, ProgramId program_id_,
|
||||
VirtualDir save_directory_)
|
||||
: system{system_}, program_id{program_id_}, dir{std::move(save_directory_)} {
|
||||
// Delete all temporary storages
|
||||
// On hardware, it is expected that temporary storage be empty at first use.
|
||||
dir->DeleteSubdirectoryRecursive("temp");
|
||||
@ -110,7 +111,7 @@ VirtualDir SaveDataFactory::Create(SaveDataSpaceId space, const SaveDataAttribut
|
||||
PrintSaveDataAttributeWarnings(meta);
|
||||
|
||||
const auto save_directory =
|
||||
GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
GetFullPath(program_id, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
|
||||
return dir->CreateDirectoryRelative(save_directory);
|
||||
}
|
||||
@ -118,7 +119,7 @@ VirtualDir SaveDataFactory::Create(SaveDataSpaceId space, const SaveDataAttribut
|
||||
VirtualDir SaveDataFactory::Open(SaveDataSpaceId space, const SaveDataAttribute& meta) const {
|
||||
|
||||
const auto save_directory =
|
||||
GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
GetFullPath(program_id, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
|
||||
auto out = dir->GetDirectoryRelative(save_directory);
|
||||
|
||||
@ -147,14 +148,14 @@ std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) {
|
||||
}
|
||||
}
|
||||
|
||||
std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir,
|
||||
std::string SaveDataFactory::GetFullPath(ProgramId program_id, VirtualDir dir,
|
||||
SaveDataSpaceId space, SaveDataType type, u64 title_id,
|
||||
u128 user_id, u64 save_id) {
|
||||
// According to switchbrew, if a save is of type SaveData and the title id field is 0, it should
|
||||
// be interpreted as the title id of the current process.
|
||||
if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) {
|
||||
if (title_id == 0) {
|
||||
title_id = system.GetApplicationProcessProgramID();
|
||||
title_id = program_id;
|
||||
}
|
||||
}
|
||||
|
||||
@ -201,7 +202,7 @@ std::string SaveDataFactory::GetUserGameSaveDataRoot(u128 user_id, bool future)
|
||||
SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
|
||||
u128 user_id) const {
|
||||
const auto path =
|
||||
GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
GetFullPath(program_id, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
|
||||
|
||||
const auto size_file = relative_dir->GetFile(GetSaveDataSizeFileName());
|
||||
@ -220,7 +221,7 @@ SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
|
||||
void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
|
||||
SaveDataSize new_value) const {
|
||||
const auto path =
|
||||
GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
GetFullPath(program_id, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
|
||||
|
||||
const auto size_file = relative_dir->CreateFile(GetSaveDataSizeFileName());
|
||||
|
@ -87,10 +87,13 @@ constexpr const char* GetSaveDataSizeFileName() {
|
||||
return ".yuzu_save_size";
|
||||
}
|
||||
|
||||
using ProgramId = u64;
|
||||
|
||||
/// File system interface to the SaveData archive
|
||||
class SaveDataFactory {
|
||||
public:
|
||||
explicit SaveDataFactory(Core::System& system_, VirtualDir save_directory_);
|
||||
explicit SaveDataFactory(Core::System& system_, ProgramId program_id_,
|
||||
VirtualDir save_directory_);
|
||||
~SaveDataFactory();
|
||||
|
||||
VirtualDir Create(SaveDataSpaceId space, const SaveDataAttribute& meta) const;
|
||||
@ -99,7 +102,7 @@ public:
|
||||
VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const;
|
||||
|
||||
static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space);
|
||||
static std::string GetFullPath(Core::System& system, VirtualDir dir, SaveDataSpaceId space,
|
||||
static std::string GetFullPath(ProgramId program_id, VirtualDir dir, SaveDataSpaceId space,
|
||||
SaveDataType type, u64 title_id, u128 user_id, u64 save_id);
|
||||
static std::string GetUserGameSaveDataRoot(u128 user_id, bool future);
|
||||
|
||||
@ -110,8 +113,9 @@ public:
|
||||
void SetAutoCreate(bool state);
|
||||
|
||||
private:
|
||||
VirtualDir dir;
|
||||
Core::System& system;
|
||||
ProgramId program_id;
|
||||
VirtualDir dir;
|
||||
bool auto_create{true};
|
||||
};
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "core/memory.h"
|
||||
#include "core/device_memory_manager.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
@ -23,7 +23,7 @@ public:
|
||||
|
||||
~GPUDirtyMemoryManager() = default;
|
||||
|
||||
void Collect(VAddr address, size_t size) {
|
||||
void Collect(PAddr address, size_t size) {
|
||||
TransformAddress t = BuildTransform(address, size);
|
||||
TransformAddress tmp, original;
|
||||
do {
|
||||
@ -47,7 +47,7 @@ public:
|
||||
std::memory_order_relaxed));
|
||||
}
|
||||
|
||||
void Gather(std::function<void(VAddr, size_t)>& callback) {
|
||||
void Gather(std::function<void(PAddr, size_t)>& callback) {
|
||||
{
|
||||
std::scoped_lock lk(guard);
|
||||
TransformAddress t = current.exchange(default_transform, std::memory_order_relaxed);
|
||||
@ -65,7 +65,7 @@ public:
|
||||
mask = mask >> empty_bits;
|
||||
|
||||
const size_t continuous_bits = std::countr_one(mask);
|
||||
callback((static_cast<VAddr>(transform.address) << page_bits) + offset,
|
||||
callback((static_cast<PAddr>(transform.address) << page_bits) + offset,
|
||||
continuous_bits << align_bits);
|
||||
mask = continuous_bits < align_size ? (mask >> continuous_bits) : 0;
|
||||
offset += continuous_bits << align_bits;
|
||||
@ -80,7 +80,7 @@ private:
|
||||
u32 mask;
|
||||
};
|
||||
|
||||
constexpr static size_t page_bits = Memory::YUZU_PAGEBITS - 1;
|
||||
constexpr static size_t page_bits = DEVICE_PAGEBITS - 1;
|
||||
constexpr static size_t page_size = 1ULL << page_bits;
|
||||
constexpr static size_t page_mask = page_size - 1;
|
||||
|
||||
@ -89,7 +89,7 @@ private:
|
||||
constexpr static size_t align_mask = align_size - 1;
|
||||
constexpr static TransformAddress default_transform = {.address = ~0U, .mask = 0U};
|
||||
|
||||
bool IsValid(VAddr address) {
|
||||
bool IsValid(PAddr address) {
|
||||
return address < (1ULL << 39);
|
||||
}
|
||||
|
||||
@ -103,7 +103,7 @@ private:
|
||||
return mask;
|
||||
}
|
||||
|
||||
TransformAddress BuildTransform(VAddr address, size_t size) {
|
||||
TransformAddress BuildTransform(PAddr address, size_t size) {
|
||||
const size_t minor_address = address & page_mask;
|
||||
const size_t minor_bit = minor_address >> align_bits;
|
||||
const size_t top_bit = (minor_address + size + align_mask) >> align_bits;
|
||||
|
214
src/core/guest_memory.h
Normal file
214
src/core/guest_memory.h
Normal file
@ -0,0 +1,214 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <span>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
|
||||
namespace Core::Memory {
|
||||
|
||||
enum GuestMemoryFlags : u32 {
|
||||
Read = 1 << 0,
|
||||
Write = 1 << 1,
|
||||
Safe = 1 << 2,
|
||||
Cached = 1 << 3,
|
||||
|
||||
SafeRead = Read | Safe,
|
||||
SafeWrite = Write | Safe,
|
||||
SafeReadWrite = SafeRead | SafeWrite,
|
||||
SafeReadCachedWrite = SafeReadWrite | Cached,
|
||||
|
||||
UnsafeRead = Read,
|
||||
UnsafeWrite = Write,
|
||||
UnsafeReadWrite = UnsafeRead | UnsafeWrite,
|
||||
UnsafeReadCachedWrite = UnsafeReadWrite | Cached,
|
||||
};
|
||||
|
||||
namespace {
|
||||
template <typename M, typename T, GuestMemoryFlags FLAGS>
|
||||
class GuestMemory {
|
||||
using iterator = T*;
|
||||
using const_iterator = const T*;
|
||||
using value_type = T;
|
||||
using element_type = T;
|
||||
using iterator_category = std::contiguous_iterator_tag;
|
||||
|
||||
public:
|
||||
GuestMemory() = delete;
|
||||
explicit GuestMemory(M& memory, u64 addr, std::size_t size,
|
||||
Common::ScratchBuffer<T>* backup = nullptr)
|
||||
: m_memory{memory}, m_addr{addr}, m_size{size} {
|
||||
static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write);
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Read) {
|
||||
Read(addr, size, backup);
|
||||
}
|
||||
}
|
||||
|
||||
~GuestMemory() = default;
|
||||
|
||||
T* data() noexcept {
|
||||
return m_data_span.data();
|
||||
}
|
||||
|
||||
const T* data() const noexcept {
|
||||
return m_data_span.data();
|
||||
}
|
||||
|
||||
size_t size() const noexcept {
|
||||
return m_size;
|
||||
}
|
||||
|
||||
size_t size_bytes() const noexcept {
|
||||
return this->size() * sizeof(T);
|
||||
}
|
||||
|
||||
[[nodiscard]] T* begin() noexcept {
|
||||
return this->data();
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* begin() const noexcept {
|
||||
return this->data();
|
||||
}
|
||||
|
||||
[[nodiscard]] T* end() noexcept {
|
||||
return this->data() + this->size();
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* end() const noexcept {
|
||||
return this->data() + this->size();
|
||||
}
|
||||
|
||||
T& operator[](size_t index) noexcept {
|
||||
return m_data_span[index];
|
||||
}
|
||||
|
||||
const T& operator[](size_t index) const noexcept {
|
||||
return m_data_span[index];
|
||||
}
|
||||
|
||||
void SetAddressAndSize(u64 addr, std::size_t size) noexcept {
|
||||
m_addr = addr;
|
||||
m_size = size;
|
||||
m_addr_changed = true;
|
||||
}
|
||||
|
||||
std::span<T> Read(u64 addr, std::size_t size,
|
||||
Common::ScratchBuffer<T>* backup = nullptr) noexcept {
|
||||
m_addr = addr;
|
||||
m_size = size;
|
||||
if (m_size == 0) {
|
||||
m_is_data_copy = true;
|
||||
return {};
|
||||
}
|
||||
|
||||
if (this->TrySetSpan()) {
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
m_memory.FlushRegion(m_addr, this->size_bytes());
|
||||
}
|
||||
} else {
|
||||
if (backup) {
|
||||
backup->resize_destructive(this->size());
|
||||
m_data_span = *backup;
|
||||
} else {
|
||||
m_data_copy.resize(this->size());
|
||||
m_data_span = std::span(m_data_copy);
|
||||
}
|
||||
m_is_data_copy = true;
|
||||
m_span_valid = true;
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
m_memory.ReadBlock(m_addr, this->data(), this->size_bytes());
|
||||
} else {
|
||||
m_memory.ReadBlockUnsafe(m_addr, this->data(), this->size_bytes());
|
||||
}
|
||||
}
|
||||
return m_data_span;
|
||||
}
|
||||
|
||||
void Write(std::span<T> write_data) noexcept {
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Cached) {
|
||||
m_memory.WriteBlockCached(m_addr, write_data.data(), this->size_bytes());
|
||||
} else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
m_memory.WriteBlock(m_addr, write_data.data(), this->size_bytes());
|
||||
} else {
|
||||
m_memory.WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
bool TrySetSpan() noexcept {
|
||||
if (u8* ptr = m_memory.GetSpan(m_addr, this->size_bytes()); ptr) {
|
||||
m_data_span = {reinterpret_cast<T*>(ptr), this->size()};
|
||||
m_span_valid = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool IsDataCopy() const noexcept {
|
||||
return m_is_data_copy;
|
||||
}
|
||||
|
||||
bool AddressChanged() const noexcept {
|
||||
return m_addr_changed;
|
||||
}
|
||||
|
||||
M& m_memory;
|
||||
u64 m_addr{};
|
||||
size_t m_size{};
|
||||
std::span<T> m_data_span{};
|
||||
std::vector<T> m_data_copy{};
|
||||
bool m_span_valid{false};
|
||||
bool m_is_data_copy{false};
|
||||
bool m_addr_changed{false};
|
||||
};
|
||||
|
||||
template <typename M, typename T, GuestMemoryFlags FLAGS>
|
||||
class GuestMemoryScoped : public GuestMemory<M, T, FLAGS> {
|
||||
public:
|
||||
GuestMemoryScoped() = delete;
|
||||
explicit GuestMemoryScoped(M& memory, u64 addr, std::size_t size,
|
||||
Common::ScratchBuffer<T>* backup = nullptr)
|
||||
: GuestMemory<M, T, FLAGS>(memory, addr, size, backup) {
|
||||
if constexpr (!(FLAGS & GuestMemoryFlags::Read)) {
|
||||
if (!this->TrySetSpan()) {
|
||||
if (backup) {
|
||||
this->m_data_span = *backup;
|
||||
this->m_span_valid = true;
|
||||
this->m_is_data_copy = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~GuestMemoryScoped() {
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Write) {
|
||||
if (this->size() == 0) [[unlikely]] {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this->AddressChanged() || this->IsDataCopy()) {
|
||||
ASSERT(this->m_span_valid);
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Cached) {
|
||||
this->m_memory.WriteBlockCached(this->m_addr, this->data(), this->size_bytes());
|
||||
} else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
this->m_memory.WriteBlock(this->m_addr, this->data(), this->size_bytes());
|
||||
} else {
|
||||
this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes());
|
||||
}
|
||||
} else if constexpr ((FLAGS & GuestMemoryFlags::Safe) ||
|
||||
(FLAGS & GuestMemoryFlags::Cached)) {
|
||||
this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
} // namespace Core::Memory
|
@ -28,14 +28,14 @@ Result KMemoryBlockManager::Initialize(KProcessAddress st, KProcessAddress nd,
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
|
||||
HostUnmapCallback&& host_unmap_callback) {
|
||||
BlockCallback&& block_callback) {
|
||||
// Erase every block until we have none left.
|
||||
auto it = m_memory_block_tree.begin();
|
||||
while (it != m_memory_block_tree.end()) {
|
||||
KMemoryBlock* block = std::addressof(*it);
|
||||
it = m_memory_block_tree.erase(it);
|
||||
block_callback(block->GetAddress(), block->GetSize());
|
||||
slab_manager->Free(block);
|
||||
host_unmap_callback(block->GetAddress(), block->GetSize());
|
||||
}
|
||||
|
||||
ASSERT(m_memory_block_tree.empty());
|
||||
|
@ -85,11 +85,11 @@ public:
|
||||
public:
|
||||
KMemoryBlockManager();
|
||||
|
||||
using HostUnmapCallback = std::function<void(Common::ProcessAddress, u64)>;
|
||||
using BlockCallback = std::function<void(Common::ProcessAddress, u64)>;
|
||||
|
||||
Result Initialize(KProcessAddress st, KProcessAddress nd,
|
||||
KMemoryBlockSlabManager* slab_manager);
|
||||
void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
|
||||
void Finalize(KMemoryBlockSlabManager* slab_manager, BlockCallback&& block_callback);
|
||||
|
||||
iterator end() {
|
||||
return m_memory_block_tree.end();
|
||||
|
@ -431,15 +431,43 @@ Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool
|
||||
m_memory_block_slab_manager));
|
||||
}
|
||||
|
||||
Result KPageTableBase::FinalizeProcess() {
|
||||
// Only process tables should be finalized.
|
||||
ASSERT(!this->IsKernel());
|
||||
|
||||
// NOTE: Here Nintendo calls an unknown OnFinalize function.
|
||||
// this->OnFinalize();
|
||||
|
||||
// NOTE: Here Nintendo calls a second unknown OnFinalize function.
|
||||
// this->OnFinalize2();
|
||||
|
||||
// NOTE: Here Nintendo does a page table walk to discover heap pages to free.
|
||||
// We will use the block manager finalization below to free them.
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KPageTableBase::Finalize() {
|
||||
auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
|
||||
if (Settings::IsFastmemEnabled()) {
|
||||
this->FinalizeProcess();
|
||||
|
||||
auto BlockCallback = [&](KProcessAddress addr, u64 size) {
|
||||
if (m_impl->fastmem_arena) {
|
||||
m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false);
|
||||
}
|
||||
|
||||
// Get physical pages.
|
||||
KPageGroup pg(m_kernel, m_block_info_manager);
|
||||
this->MakePageGroup(pg, addr, size / PageSize);
|
||||
|
||||
// Free the pages.
|
||||
pg.CloseAndReset();
|
||||
};
|
||||
|
||||
// Finalize memory blocks.
|
||||
m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
|
||||
{
|
||||
KScopedLightLock lk(m_general_lock);
|
||||
m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(BlockCallback));
|
||||
}
|
||||
|
||||
// Free any unsafe mapped memory.
|
||||
if (m_mapped_unsafe_physical_memory) {
|
||||
|
@ -241,6 +241,7 @@ public:
|
||||
KResourceLimit* resource_limit, Core::Memory::Memory& memory,
|
||||
KProcessAddress aslr_space_start);
|
||||
|
||||
Result FinalizeProcess();
|
||||
void Finalize();
|
||||
|
||||
bool IsKernel() const {
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/core.h"
|
||||
#include "core/gpu_dirty_memory_manager.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
@ -171,6 +172,12 @@ void KProcess::Finalize() {
|
||||
m_resource_limit->Close();
|
||||
}
|
||||
|
||||
// Clear expensive resources, as the destructor is not called for guest objects.
|
||||
for (auto& interface : m_arm_interfaces) {
|
||||
interface.reset();
|
||||
}
|
||||
m_exclusive_monitor.reset();
|
||||
|
||||
// Perform inherited finalization.
|
||||
KSynchronizationObject::Finalize();
|
||||
}
|
||||
@ -314,7 +321,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa
|
||||
|
||||
// Ensure our memory is initialized.
|
||||
m_memory.SetCurrentPageTable(*this);
|
||||
m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
|
||||
m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager());
|
||||
|
||||
// Ensure we can insert the code region.
|
||||
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize,
|
||||
@ -411,7 +418,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
|
||||
|
||||
// Ensure our memory is initialized.
|
||||
m_memory.SetCurrentPageTable(*this);
|
||||
m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
|
||||
m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager());
|
||||
|
||||
// Ensure we can insert the code region.
|
||||
R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code),
|
||||
@ -1135,8 +1142,7 @@ void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
|
||||
KProcess::KProcess(KernelCore& kernel)
|
||||
: KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
|
||||
m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
|
||||
m_handle_table{kernel}, m_dirty_memory_managers{},
|
||||
m_exclusive_monitor{}, m_memory{kernel.System()} {}
|
||||
m_handle_table{kernel}, m_exclusive_monitor{}, m_memory{kernel.System()} {}
|
||||
KProcess::~KProcess() = default;
|
||||
|
||||
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
|
||||
@ -1233,10 +1239,10 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
|
||||
ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite);
|
||||
|
||||
#ifdef HAS_NCE
|
||||
if (this->IsApplication() && Settings::IsNceEnabled()) {
|
||||
const auto& patch = code_set.PatchSegment();
|
||||
if (this->IsApplication() && Settings::IsNceEnabled() && patch.size != 0) {
|
||||
auto& buffer = m_kernel.System().DeviceMemory().buffer;
|
||||
const auto& code = code_set.CodeSegment();
|
||||
const auto& patch = code_set.PatchSegment();
|
||||
buffer.Protect(GetInteger(base_addr + code.addr), code.size,
|
||||
Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
|
||||
buffer.Protect(GetInteger(base_addr + patch.addr), patch.size,
|
||||
@ -1318,10 +1324,4 @@ bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointT
|
||||
return true;
|
||||
}
|
||||
|
||||
void KProcess::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) {
|
||||
for (auto& manager : m_dirty_memory_managers) {
|
||||
manager.Gather(callback);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/gpu_dirty_memory_manager.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
#include "core/hle/kernel/k_capabilities.h"
|
||||
@ -128,7 +127,6 @@ private:
|
||||
#ifdef HAS_NCE
|
||||
std::unordered_map<u64, u64> m_post_handlers{};
|
||||
#endif
|
||||
std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES> m_dirty_memory_managers;
|
||||
std::unique_ptr<Core::ExclusiveMonitor> m_exclusive_monitor;
|
||||
Core::Memory::Memory m_memory;
|
||||
|
||||
@ -511,8 +509,6 @@ public:
|
||||
return m_memory;
|
||||
}
|
||||
|
||||
void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback);
|
||||
|
||||
Core::ExclusiveMonitor& GetExclusiveMonitor() const {
|
||||
return *m_exclusive_monitor;
|
||||
}
|
||||
|
@ -112,7 +112,14 @@ struct KernelCore::Impl {
|
||||
old_process->Close();
|
||||
}
|
||||
|
||||
{
|
||||
std::scoped_lock lk{process_list_lock};
|
||||
for (auto* const process : process_list) {
|
||||
process->Terminate();
|
||||
process->Close();
|
||||
}
|
||||
process_list.clear();
|
||||
}
|
||||
|
||||
next_object_id = 0;
|
||||
next_kernel_process_id = KProcess::InitialProcessIdMin;
|
||||
@ -770,6 +777,7 @@ struct KernelCore::Impl {
|
||||
std::atomic<u64> next_thread_id{1};
|
||||
|
||||
// Lists all processes that exist in the current session.
|
||||
std::mutex process_list_lock;
|
||||
std::vector<KProcess*> process_list;
|
||||
std::atomic<KProcess*> application_process{};
|
||||
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
|
||||
@ -869,9 +877,19 @@ KResourceLimit* KernelCore::GetSystemResourceLimit() {
|
||||
}
|
||||
|
||||
void KernelCore::AppendNewProcess(KProcess* process) {
|
||||
process->Open();
|
||||
|
||||
std::scoped_lock lk{impl->process_list_lock};
|
||||
impl->process_list.push_back(process);
|
||||
}
|
||||
|
||||
void KernelCore::RemoveProcess(KProcess* process) {
|
||||
std::scoped_lock lk{impl->process_list_lock};
|
||||
if (std::erase(impl->process_list, process)) {
|
||||
process->Close();
|
||||
}
|
||||
}
|
||||
|
||||
void KernelCore::MakeApplicationProcess(KProcess* process) {
|
||||
impl->MakeApplicationProcess(process);
|
||||
}
|
||||
@ -884,8 +902,15 @@ const KProcess* KernelCore::ApplicationProcess() const {
|
||||
return impl->application_process;
|
||||
}
|
||||
|
||||
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
|
||||
return impl->process_list;
|
||||
std::list<KScopedAutoObject<KProcess>> KernelCore::GetProcessList() {
|
||||
std::list<KScopedAutoObject<KProcess>> processes;
|
||||
std::scoped_lock lk{impl->process_list_lock};
|
||||
|
||||
for (auto* const process : impl->process_list) {
|
||||
processes.emplace_back(process);
|
||||
}
|
||||
|
||||
return processes;
|
||||
}
|
||||
|
||||
Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() {
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <array>
|
||||
#include <functional>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
@ -116,8 +117,9 @@ public:
|
||||
/// Retrieves a shared pointer to the system resource limit instance.
|
||||
KResourceLimit* GetSystemResourceLimit();
|
||||
|
||||
/// Adds the given shared pointer to an internal list of active processes.
|
||||
/// Adds/removes the given pointer to an internal list of active processes.
|
||||
void AppendNewProcess(KProcess* process);
|
||||
void RemoveProcess(KProcess* process);
|
||||
|
||||
/// Makes the given process the new application process.
|
||||
void MakeApplicationProcess(KProcess* process);
|
||||
@ -129,7 +131,7 @@ public:
|
||||
const KProcess* ApplicationProcess() const;
|
||||
|
||||
/// Retrieves the list of processes.
|
||||
const std::vector<KProcess*>& GetProcessList() const;
|
||||
std::list<KScopedAutoObject<KProcess>> GetProcessList();
|
||||
|
||||
/// Gets the sole instance of the global scheduler
|
||||
Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
|
||||
|
@ -74,13 +74,15 @@ Result GetProcessList(Core::System& system, s32* out_num_processes, u64 out_proc
|
||||
}
|
||||
|
||||
auto& memory = GetCurrentMemory(kernel);
|
||||
const auto& process_list = kernel.GetProcessList();
|
||||
auto process_list = kernel.GetProcessList();
|
||||
auto it = process_list.begin();
|
||||
|
||||
const auto num_processes = process_list.size();
|
||||
const auto copy_amount =
|
||||
std::min(static_cast<std::size_t>(out_process_ids_size), num_processes);
|
||||
|
||||
for (std::size_t i = 0; i < copy_amount; ++i) {
|
||||
memory.Write64(out_process_ids, process_list[i]->GetProcessId());
|
||||
for (std::size_t i = 0; i < copy_amount && it != process_list.end(); ++i, ++it) {
|
||||
memory.Write64(out_process_ids, (*it)->GetProcessId());
|
||||
out_process_ids += sizeof(u64);
|
||||
}
|
||||
|
||||
|
@ -61,9 +61,7 @@ ProfileManager::ProfileManager() {
|
||||
OpenUser(*GetUser(current));
|
||||
}
|
||||
|
||||
ProfileManager::~ProfileManager() {
|
||||
WriteUserSaveFile();
|
||||
}
|
||||
ProfileManager::~ProfileManager() = default;
|
||||
|
||||
/// After a users creation it needs to be "registered" to the system. AddToProfiles handles the
|
||||
/// internal management of the users profiles
|
||||
@ -113,6 +111,8 @@ Result ProfileManager::CreateNewUser(UUID uuid, const ProfileUsername& username)
|
||||
return ERROR_USER_ALREADY_EXISTS;
|
||||
}
|
||||
|
||||
is_save_needed = true;
|
||||
|
||||
return AddUser({
|
||||
.user_uuid = uuid,
|
||||
.username = username,
|
||||
@ -326,6 +326,9 @@ bool ProfileManager::RemoveUser(UUID uuid) {
|
||||
profiles[*index] = ProfileInfo{};
|
||||
std::stable_partition(profiles.begin(), profiles.end(),
|
||||
[](const ProfileInfo& profile) { return profile.user_uuid.IsValid(); });
|
||||
|
||||
is_save_needed = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -340,6 +343,8 @@ bool ProfileManager::SetProfileBase(UUID uuid, const ProfileBase& profile_new) {
|
||||
profile.username = profile_new.username;
|
||||
profile.creation_time = profile_new.timestamp;
|
||||
|
||||
is_save_needed = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -348,6 +353,7 @@ bool ProfileManager::SetProfileBaseAndData(Common::UUID uuid, const ProfileBase&
|
||||
const auto index = GetUserIndex(uuid);
|
||||
if (index.has_value() && SetProfileBase(uuid, profile_new)) {
|
||||
profiles[*index].data = data_new;
|
||||
is_save_needed = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -391,6 +397,10 @@ void ProfileManager::ParseUserSaveFile() {
|
||||
}
|
||||
|
||||
void ProfileManager::WriteUserSaveFile() {
|
||||
if (!is_save_needed) {
|
||||
return;
|
||||
}
|
||||
|
||||
ProfileDataRaw raw{};
|
||||
|
||||
for (std::size_t i = 0; i < MAX_USERS; ++i) {
|
||||
@ -423,7 +433,10 @@ void ProfileManager::WriteUserSaveFile() {
|
||||
if (!save.IsOpen() || !save.SetSize(sizeof(ProfileDataRaw)) || !save.WriteObject(raw)) {
|
||||
LOG_WARNING(Service_ACC, "Failed to write save data to file... No changes to user data "
|
||||
"made in current session will be saved.");
|
||||
return;
|
||||
}
|
||||
|
||||
is_save_needed = false;
|
||||
}
|
||||
|
||||
}; // namespace Service::Account
|
||||
|
@ -103,6 +103,7 @@ private:
|
||||
std::optional<std::size_t> AddToProfiles(const ProfileInfo& profile);
|
||||
bool RemoveProfileAtIndex(std::size_t index);
|
||||
|
||||
bool is_save_needed{};
|
||||
std::array<ProfileInfo, MAX_USERS> profiles{};
|
||||
std::array<ProfileInfo, MAX_USERS> stored_opened_profiles{};
|
||||
std::size_t user_count{};
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "core/hle/service/caps/caps_su.h"
|
||||
#include "core/hle/service/caps/caps_types.h"
|
||||
#include "core/hle/service/filesystem/filesystem.h"
|
||||
#include "core/hle/service/filesystem/save_data_controller.h"
|
||||
#include "core/hle/service/ipc_helpers.h"
|
||||
#include "core/hle/service/ns/ns.h"
|
||||
#include "core/hle/service/nvnflinger/fb_share_buffer_manager.h"
|
||||
@ -2178,7 +2179,7 @@ void IApplicationFunctions::EnsureSaveData(HLERequestContext& ctx) {
|
||||
attribute.type = FileSys::SaveDataType::SaveData;
|
||||
|
||||
FileSys::VirtualDir save_data{};
|
||||
const auto res = system.GetFileSystemController().CreateSaveData(
|
||||
const auto res = system.GetFileSystemController().OpenSaveDataController()->CreateSaveData(
|
||||
&save_data, FileSys::SaveDataSpaceId::NandUser, attribute);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
@ -2353,7 +2354,7 @@ void IApplicationFunctions::ExtendSaveData(HLERequestContext& ctx) {
|
||||
"new_journal={:016X}",
|
||||
static_cast<u8>(type), user_id[1], user_id[0], new_normal_size, new_journal_size);
|
||||
|
||||
system.GetFileSystemController().WriteSaveDataSize(
|
||||
system.GetFileSystemController().OpenSaveDataController()->WriteSaveDataSize(
|
||||
type, system.GetApplicationProcessProgramID(), user_id,
|
||||
{new_normal_size, new_journal_size});
|
||||
|
||||
@ -2378,7 +2379,7 @@ void IApplicationFunctions::GetSaveDataSize(HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_AM, "called with type={:02X}, user_id={:016X}{:016X}", type, user_id[1],
|
||||
user_id[0]);
|
||||
|
||||
const auto size = system.GetFileSystemController().ReadSaveDataSize(
|
||||
const auto size = system.GetFileSystemController().OpenSaveDataController()->ReadSaveDataSize(
|
||||
type, system.GetApplicationProcessProgramID(), user_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 6};
|
||||
|
@ -18,11 +18,11 @@ using namespace AudioCore::AudioIn;
|
||||
class IAudioIn final : public ServiceFramework<IAudioIn> {
|
||||
public:
|
||||
explicit IAudioIn(Core::System& system_, Manager& manager, size_t session_id,
|
||||
const std::string& device_name, const AudioInParameter& in_params, u32 handle,
|
||||
u64 applet_resource_user_id)
|
||||
const std::string& device_name, const AudioInParameter& in_params,
|
||||
Kernel::KProcess* handle, u64 applet_resource_user_id)
|
||||
: ServiceFramework{system_, "IAudioIn"},
|
||||
service_context{system_, "IAudioIn"}, event{service_context.CreateEvent("AudioInEvent")},
|
||||
impl{std::make_shared<In>(system_, manager, event, session_id)} {
|
||||
process{handle}, impl{std::make_shared<In>(system_, manager, event, session_id)} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &IAudioIn::GetAudioInState, "GetAudioInState"},
|
||||
@ -45,6 +45,8 @@ public:
|
||||
|
||||
RegisterHandlers(functions);
|
||||
|
||||
process->Open();
|
||||
|
||||
if (impl->GetSystem()
|
||||
.Initialize(device_name, in_params, handle, applet_resource_user_id)
|
||||
.IsError()) {
|
||||
@ -55,6 +57,7 @@ public:
|
||||
~IAudioIn() override {
|
||||
impl->Free();
|
||||
service_context.CloseEvent(event);
|
||||
process->Close();
|
||||
}
|
||||
|
||||
[[nodiscard]] std::shared_ptr<In> GetImpl() {
|
||||
@ -196,6 +199,7 @@ private:
|
||||
|
||||
KernelHelpers::ServiceContext service_context;
|
||||
Kernel::KEvent* event;
|
||||
Kernel::KProcess* process;
|
||||
std::shared_ptr<AudioCore::AudioIn::In> impl;
|
||||
Common::ScratchBuffer<u64> released_buffer;
|
||||
};
|
||||
@ -267,6 +271,14 @@ void AudInU::OpenAudioIn(HLERequestContext& ctx) {
|
||||
auto device_name = Common::StringFromBuffer(device_name_data);
|
||||
auto handle{ctx.GetCopyHandle(0)};
|
||||
|
||||
auto process{ctx.GetObjectFromHandle<Kernel::KProcess>(handle)};
|
||||
if (process.IsNull()) {
|
||||
LOG_ERROR(Service_Audio, "Failed to get process handle");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultUnknown);
|
||||
return;
|
||||
}
|
||||
|
||||
std::scoped_lock l{impl->mutex};
|
||||
auto link{impl->LinkToManager()};
|
||||
if (link.IsError()) {
|
||||
@ -287,8 +299,9 @@ void AudInU::OpenAudioIn(HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_Audio, "Opening new AudioIn, sessionid={}, free sessions={}", new_session_id,
|
||||
impl->num_free_sessions);
|
||||
|
||||
auto audio_in = std::make_shared<IAudioIn>(system, *impl, new_session_id, device_name,
|
||||
in_params, handle, applet_resource_user_id);
|
||||
auto audio_in =
|
||||
std::make_shared<IAudioIn>(system, *impl, new_session_id, device_name, in_params,
|
||||
process.GetPointerUnsafe(), applet_resource_user_id);
|
||||
impl->sessions[new_session_id] = audio_in->GetImpl();
|
||||
impl->applet_resource_user_ids[new_session_id] = applet_resource_user_id;
|
||||
|
||||
@ -318,6 +331,14 @@ void AudInU::OpenAudioInProtocolSpecified(HLERequestContext& ctx) {
|
||||
auto device_name = Common::StringFromBuffer(device_name_data);
|
||||
auto handle{ctx.GetCopyHandle(0)};
|
||||
|
||||
auto process{ctx.GetObjectFromHandle<Kernel::KProcess>(handle)};
|
||||
if (process.IsNull()) {
|
||||
LOG_ERROR(Service_Audio, "Failed to get process handle");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultUnknown);
|
||||
return;
|
||||
}
|
||||
|
||||
std::scoped_lock l{impl->mutex};
|
||||
auto link{impl->LinkToManager()};
|
||||
if (link.IsError()) {
|
||||
@ -338,8 +359,9 @@ void AudInU::OpenAudioInProtocolSpecified(HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_Audio, "Opening new AudioIn, sessionid={}, free sessions={}", new_session_id,
|
||||
impl->num_free_sessions);
|
||||
|
||||
auto audio_in = std::make_shared<IAudioIn>(system, *impl, new_session_id, device_name,
|
||||
in_params, handle, applet_resource_user_id);
|
||||
auto audio_in =
|
||||
std::make_shared<IAudioIn>(system, *impl, new_session_id, device_name, in_params,
|
||||
process.GetPointerUnsafe(), applet_resource_user_id);
|
||||
impl->sessions[new_session_id] = audio_in->GetImpl();
|
||||
impl->applet_resource_user_ids[new_session_id] = applet_resource_user_id;
|
||||
|
||||
|
@ -26,9 +26,10 @@ class IAudioOut final : public ServiceFramework<IAudioOut> {
|
||||
public:
|
||||
explicit IAudioOut(Core::System& system_, AudioCore::AudioOut::Manager& manager,
|
||||
size_t session_id, const std::string& device_name,
|
||||
const AudioOutParameter& in_params, u32 handle, u64 applet_resource_user_id)
|
||||
const AudioOutParameter& in_params, Kernel::KProcess* handle,
|
||||
u64 applet_resource_user_id)
|
||||
: ServiceFramework{system_, "IAudioOut"}, service_context{system_, "IAudioOut"},
|
||||
event{service_context.CreateEvent("AudioOutEvent")},
|
||||
event{service_context.CreateEvent("AudioOutEvent")}, process{handle},
|
||||
impl{std::make_shared<AudioCore::AudioOut::Out>(system_, manager, event, session_id)} {
|
||||
|
||||
// clang-format off
|
||||
@ -50,11 +51,14 @@ public:
|
||||
};
|
||||
// clang-format on
|
||||
RegisterHandlers(functions);
|
||||
|
||||
process->Open();
|
||||
}
|
||||
|
||||
~IAudioOut() override {
|
||||
impl->Free();
|
||||
service_context.CloseEvent(event);
|
||||
process->Close();
|
||||
}
|
||||
|
||||
[[nodiscard]] std::shared_ptr<AudioCore::AudioOut::Out> GetImpl() {
|
||||
@ -206,6 +210,7 @@ private:
|
||||
|
||||
KernelHelpers::ServiceContext service_context;
|
||||
Kernel::KEvent* event;
|
||||
Kernel::KProcess* process;
|
||||
std::shared_ptr<AudioCore::AudioOut::Out> impl;
|
||||
Common::ScratchBuffer<u64> released_buffer;
|
||||
};
|
||||
@ -257,6 +262,14 @@ void AudOutU::OpenAudioOut(HLERequestContext& ctx) {
|
||||
auto device_name = Common::StringFromBuffer(device_name_data);
|
||||
auto handle{ctx.GetCopyHandle(0)};
|
||||
|
||||
auto process{ctx.GetObjectFromHandle<Kernel::KProcess>(handle)};
|
||||
if (process.IsNull()) {
|
||||
LOG_ERROR(Service_Audio, "Failed to get process handle");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultUnknown);
|
||||
return;
|
||||
}
|
||||
|
||||
auto link{impl->LinkToManager()};
|
||||
if (link.IsError()) {
|
||||
LOG_ERROR(Service_Audio, "Failed to link Audio Out to Audio Manager");
|
||||
@ -276,10 +289,11 @@ void AudOutU::OpenAudioOut(HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_Audio, "Opening new AudioOut, sessionid={}, free sessions={}", new_session_id,
|
||||
impl->num_free_sessions);
|
||||
|
||||
auto audio_out = std::make_shared<IAudioOut>(system, *impl, new_session_id, device_name,
|
||||
in_params, handle, applet_resource_user_id);
|
||||
result = audio_out->GetImpl()->GetSystem().Initialize(device_name, in_params, handle,
|
||||
applet_resource_user_id);
|
||||
auto audio_out =
|
||||
std::make_shared<IAudioOut>(system, *impl, new_session_id, device_name, in_params,
|
||||
process.GetPointerUnsafe(), applet_resource_user_id);
|
||||
result = audio_out->GetImpl()->GetSystem().Initialize(
|
||||
device_name, in_params, process.GetPointerUnsafe(), applet_resource_user_id);
|
||||
if (result.IsError()) {
|
||||
LOG_ERROR(Service_Audio, "Failed to initialize the AudioOut System!");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
|
@ -24,15 +24,13 @@
|
||||
#include "core/hle/service/filesystem/fsp_ldr.h"
|
||||
#include "core/hle/service/filesystem/fsp_pr.h"
|
||||
#include "core/hle/service/filesystem/fsp_srv.h"
|
||||
#include "core/hle/service/filesystem/romfs_controller.h"
|
||||
#include "core/hle/service/filesystem/save_data_controller.h"
|
||||
#include "core/hle/service/server_manager.h"
|
||||
#include "core/loader/loader.h"
|
||||
|
||||
namespace Service::FileSystem {
|
||||
|
||||
// A default size for normal/journal save data size if application control metadata cannot be found.
|
||||
// This should be large enough to satisfy even the most extreme requirements (~4.2GB)
|
||||
constexpr u64 SUFFICIENT_SAVE_DATA_SIZE = 0xF0000000;
|
||||
|
||||
static FileSys::VirtualDir GetDirectoryRelativeWrapped(FileSys::VirtualDir base,
|
||||
std::string_view dir_name_) {
|
||||
std::string dir_name(Common::FS::SanitizePath(dir_name_));
|
||||
@ -297,145 +295,65 @@ FileSystemController::FileSystemController(Core::System& system_) : system{syste
|
||||
|
||||
FileSystemController::~FileSystemController() = default;
|
||||
|
||||
Result FileSystemController::RegisterRomFS(std::unique_ptr<FileSys::RomFSFactory>&& factory) {
|
||||
romfs_factory = std::move(factory);
|
||||
LOG_DEBUG(Service_FS, "Registered RomFS");
|
||||
Result FileSystemController::RegisterProcess(
|
||||
ProcessId process_id, ProgramId program_id,
|
||||
std::shared_ptr<FileSys::RomFSFactory>&& romfs_factory) {
|
||||
std::scoped_lock lk{registration_lock};
|
||||
|
||||
registrations.emplace(process_id, Registration{
|
||||
.program_id = program_id,
|
||||
.romfs_factory = std::move(romfs_factory),
|
||||
.save_data_factory = CreateSaveDataFactory(program_id),
|
||||
});
|
||||
|
||||
LOG_DEBUG(Service_FS, "Registered for process {}", process_id);
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
Result FileSystemController::RegisterSaveData(std::unique_ptr<FileSys::SaveDataFactory>&& factory) {
|
||||
ASSERT_MSG(save_data_factory == nullptr, "Tried to register a second save data");
|
||||
save_data_factory = std::move(factory);
|
||||
LOG_DEBUG(Service_FS, "Registered save data");
|
||||
Result FileSystemController::OpenProcess(
|
||||
ProgramId* out_program_id, std::shared_ptr<SaveDataController>* out_save_data_controller,
|
||||
std::shared_ptr<RomFsController>* out_romfs_controller, ProcessId process_id) {
|
||||
std::scoped_lock lk{registration_lock};
|
||||
|
||||
const auto it = registrations.find(process_id);
|
||||
if (it == registrations.end()) {
|
||||
return FileSys::ERROR_ENTITY_NOT_FOUND;
|
||||
}
|
||||
|
||||
*out_program_id = it->second.program_id;
|
||||
*out_save_data_controller =
|
||||
std::make_shared<SaveDataController>(system, it->second.save_data_factory);
|
||||
*out_romfs_controller =
|
||||
std::make_shared<RomFsController>(it->second.romfs_factory, it->second.program_id);
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
Result FileSystemController::RegisterSDMC(std::unique_ptr<FileSys::SDMCFactory>&& factory) {
|
||||
ASSERT_MSG(sdmc_factory == nullptr, "Tried to register a second SDMC");
|
||||
sdmc_factory = std::move(factory);
|
||||
LOG_DEBUG(Service_FS, "Registered SDMC");
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
Result FileSystemController::RegisterBIS(std::unique_ptr<FileSys::BISFactory>&& factory) {
|
||||
ASSERT_MSG(bis_factory == nullptr, "Tried to register a second BIS");
|
||||
bis_factory = std::move(factory);
|
||||
LOG_DEBUG(Service_FS, "Registered BIS");
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
void FileSystemController::SetPackedUpdate(FileSys::VirtualFile update_raw) {
|
||||
void FileSystemController::SetPackedUpdate(ProcessId process_id, FileSys::VirtualFile update_raw) {
|
||||
LOG_TRACE(Service_FS, "Setting packed update for romfs");
|
||||
|
||||
if (romfs_factory == nullptr)
|
||||
std::scoped_lock lk{registration_lock};
|
||||
const auto it = registrations.find(process_id);
|
||||
if (it == registrations.end()) {
|
||||
return;
|
||||
}
|
||||
|
||||
romfs_factory->SetPackedUpdate(std::move(update_raw));
|
||||
it->second.romfs_factory->SetPackedUpdate(std::move(update_raw));
|
||||
}
|
||||
|
||||
FileSys::VirtualFile FileSystemController::OpenRomFSCurrentProcess() const {
|
||||
LOG_TRACE(Service_FS, "Opening RomFS for current process");
|
||||
|
||||
if (romfs_factory == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return romfs_factory->OpenCurrentProcess(system.GetApplicationProcessProgramID());
|
||||
std::shared_ptr<SaveDataController> FileSystemController::OpenSaveDataController() {
|
||||
return std::make_shared<SaveDataController>(system, CreateSaveDataFactory(ProgramId{}));
|
||||
}
|
||||
|
||||
FileSys::VirtualFile FileSystemController::OpenPatchedRomFS(u64 title_id,
|
||||
FileSys::ContentRecordType type) const {
|
||||
LOG_TRACE(Service_FS, "Opening patched RomFS for title_id={:016X}", title_id);
|
||||
std::shared_ptr<FileSys::SaveDataFactory> FileSystemController::CreateSaveDataFactory(
|
||||
ProgramId program_id) {
|
||||
using YuzuPath = Common::FS::YuzuPath;
|
||||
const auto rw_mode = FileSys::Mode::ReadWrite;
|
||||
|
||||
if (romfs_factory == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return romfs_factory->OpenPatchedRomFS(title_id, type);
|
||||
}
|
||||
|
||||
FileSys::VirtualFile FileSystemController::OpenPatchedRomFSWithProgramIndex(
|
||||
u64 title_id, u8 program_index, FileSys::ContentRecordType type) const {
|
||||
LOG_TRACE(Service_FS, "Opening patched RomFS for title_id={:016X}, program_index={}", title_id,
|
||||
program_index);
|
||||
|
||||
if (romfs_factory == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return romfs_factory->OpenPatchedRomFSWithProgramIndex(title_id, program_index, type);
|
||||
}
|
||||
|
||||
FileSys::VirtualFile FileSystemController::OpenRomFS(u64 title_id, FileSys::StorageId storage_id,
|
||||
FileSys::ContentRecordType type) const {
|
||||
LOG_TRACE(Service_FS, "Opening RomFS for title_id={:016X}, storage_id={:02X}, type={:02X}",
|
||||
title_id, storage_id, type);
|
||||
|
||||
if (romfs_factory == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return romfs_factory->Open(title_id, storage_id, type);
|
||||
}
|
||||
|
||||
std::shared_ptr<FileSys::NCA> FileSystemController::OpenBaseNca(
|
||||
u64 title_id, FileSys::StorageId storage_id, FileSys::ContentRecordType type) const {
|
||||
return romfs_factory->GetEntry(title_id, storage_id, type);
|
||||
}
|
||||
|
||||
Result FileSystemController::CreateSaveData(FileSys::VirtualDir* out_save_data,
|
||||
FileSys::SaveDataSpaceId space,
|
||||
const FileSys::SaveDataAttribute& save_struct) const {
|
||||
LOG_TRACE(Service_FS, "Creating Save Data for space_id={:01X}, save_struct={}", space,
|
||||
save_struct.DebugInfo());
|
||||
|
||||
if (save_data_factory == nullptr) {
|
||||
return FileSys::ERROR_ENTITY_NOT_FOUND;
|
||||
}
|
||||
|
||||
auto save_data = save_data_factory->Create(space, save_struct);
|
||||
if (save_data == nullptr) {
|
||||
return FileSys::ERROR_ENTITY_NOT_FOUND;
|
||||
}
|
||||
|
||||
*out_save_data = save_data;
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
Result FileSystemController::OpenSaveData(FileSys::VirtualDir* out_save_data,
|
||||
FileSys::SaveDataSpaceId space,
|
||||
const FileSys::SaveDataAttribute& attribute) const {
|
||||
LOG_TRACE(Service_FS, "Opening Save Data for space_id={:01X}, save_struct={}", space,
|
||||
attribute.DebugInfo());
|
||||
|
||||
if (save_data_factory == nullptr) {
|
||||
return FileSys::ERROR_ENTITY_NOT_FOUND;
|
||||
}
|
||||
|
||||
auto save_data = save_data_factory->Open(space, attribute);
|
||||
if (save_data == nullptr) {
|
||||
return FileSys::ERROR_ENTITY_NOT_FOUND;
|
||||
}
|
||||
|
||||
*out_save_data = save_data;
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
Result FileSystemController::OpenSaveDataSpace(FileSys::VirtualDir* out_save_data_space,
|
||||
FileSys::SaveDataSpaceId space) const {
|
||||
LOG_TRACE(Service_FS, "Opening Save Data Space for space_id={:01X}", space);
|
||||
|
||||
if (save_data_factory == nullptr) {
|
||||
return FileSys::ERROR_ENTITY_NOT_FOUND;
|
||||
}
|
||||
|
||||
auto save_data_space = save_data_factory->GetSaveDataSpaceDirectory(space);
|
||||
if (save_data_space == nullptr) {
|
||||
return FileSys::ERROR_ENTITY_NOT_FOUND;
|
||||
}
|
||||
|
||||
*out_save_data_space = save_data_space;
|
||||
return ResultSuccess;
|
||||
auto vfs = system.GetFilesystem();
|
||||
const auto nand_directory =
|
||||
vfs->OpenDirectory(Common::FS::GetYuzuPathString(YuzuPath::NANDDir), rw_mode);
|
||||
return std::make_shared<FileSys::SaveDataFactory>(system, program_id,
|
||||
std::move(nand_directory));
|
||||
}
|
||||
|
||||
Result FileSystemController::OpenSDMC(FileSys::VirtualDir* out_sdmc) const {
|
||||
@ -540,48 +458,6 @@ u64 FileSystemController::GetTotalSpaceSize(FileSys::StorageId id) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
FileSys::SaveDataSize FileSystemController::ReadSaveDataSize(FileSys::SaveDataType type,
|
||||
u64 title_id, u128 user_id) const {
|
||||
if (save_data_factory == nullptr) {
|
||||
return {0, 0};
|
||||
}
|
||||
|
||||
const auto value = save_data_factory->ReadSaveDataSize(type, title_id, user_id);
|
||||
|
||||
if (value.normal == 0 && value.journal == 0) {
|
||||
FileSys::SaveDataSize new_size{SUFFICIENT_SAVE_DATA_SIZE, SUFFICIENT_SAVE_DATA_SIZE};
|
||||
|
||||
FileSys::NACP nacp;
|
||||
const auto res = system.GetAppLoader().ReadControlData(nacp);
|
||||
|
||||
if (res != Loader::ResultStatus::Success) {
|
||||
const FileSys::PatchManager pm{system.GetApplicationProcessProgramID(),
|
||||
system.GetFileSystemController(),
|
||||
system.GetContentProvider()};
|
||||
const auto metadata = pm.GetControlMetadata();
|
||||
const auto& nacp_unique = metadata.first;
|
||||
|
||||
if (nacp_unique != nullptr) {
|
||||
new_size = {nacp_unique->GetDefaultNormalSaveSize(),
|
||||
nacp_unique->GetDefaultJournalSaveSize()};
|
||||
}
|
||||
} else {
|
||||
new_size = {nacp.GetDefaultNormalSaveSize(), nacp.GetDefaultJournalSaveSize()};
|
||||
}
|
||||
|
||||
WriteSaveDataSize(type, title_id, user_id, new_size);
|
||||
return new_size;
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
void FileSystemController::WriteSaveDataSize(FileSys::SaveDataType type, u64 title_id, u128 user_id,
|
||||
FileSys::SaveDataSize new_value) const {
|
||||
if (save_data_factory != nullptr)
|
||||
save_data_factory->WriteSaveDataSize(type, title_id, user_id, new_value);
|
||||
}
|
||||
|
||||
void FileSystemController::SetGameCard(FileSys::VirtualFile file) {
|
||||
gamecard = std::make_unique<FileSys::XCI>(file);
|
||||
const auto dir = gamecard->ConcatenatedPseudoDirectory();
|
||||
@ -801,14 +677,9 @@ FileSys::VirtualDir FileSystemController::GetBCATDirectory(u64 title_id) const {
|
||||
return bis_factory->GetBCATDirectory(title_id);
|
||||
}
|
||||
|
||||
void FileSystemController::SetAutoSaveDataCreation(bool enable) {
|
||||
save_data_factory->SetAutoCreate(enable);
|
||||
}
|
||||
|
||||
void FileSystemController::CreateFactories(FileSys::VfsFilesystem& vfs, bool overwrite) {
|
||||
if (overwrite) {
|
||||
bis_factory = nullptr;
|
||||
save_data_factory = nullptr;
|
||||
sdmc_factory = nullptr;
|
||||
}
|
||||
|
||||
@ -836,11 +707,6 @@ void FileSystemController::CreateFactories(FileSys::VfsFilesystem& vfs, bool ove
|
||||
bis_factory->GetUserNANDContents());
|
||||
}
|
||||
|
||||
if (save_data_factory == nullptr) {
|
||||
save_data_factory =
|
||||
std::make_unique<FileSys::SaveDataFactory>(system, std::move(nand_directory));
|
||||
}
|
||||
|
||||
if (sdmc_factory == nullptr) {
|
||||
sdmc_factory = std::make_unique<FileSys::SDMCFactory>(std::move(sd_directory),
|
||||
std::move(sd_load_directory));
|
||||
@ -849,12 +715,19 @@ void FileSystemController::CreateFactories(FileSys::VfsFilesystem& vfs, bool ove
|
||||
}
|
||||
}
|
||||
|
||||
void FileSystemController::Reset() {
|
||||
std::scoped_lock lk{registration_lock};
|
||||
registrations.clear();
|
||||
}
|
||||
|
||||
void LoopProcess(Core::System& system) {
|
||||
auto server_manager = std::make_unique<ServerManager>(system);
|
||||
|
||||
const auto FileSystemProxyFactory = [&] { return std::make_shared<FSP_SRV>(system); };
|
||||
|
||||
server_manager->RegisterNamedService("fsp-ldr", std::make_shared<FSP_LDR>(system));
|
||||
server_manager->RegisterNamedService("fsp:pr", std::make_shared<FSP_PR>(system));
|
||||
server_manager->RegisterNamedService("fsp-srv", std::make_shared<FSP_SRV>(system));
|
||||
server_manager->RegisterNamedService("fsp-srv", std::move(FileSystemProxyFactory));
|
||||
ServerManager::RunServer(std::move(server_manager));
|
||||
}
|
||||
|
||||
|
@ -43,6 +43,9 @@ class ServiceManager;
|
||||
|
||||
namespace FileSystem {
|
||||
|
||||
class RomFsController;
|
||||
class SaveDataController;
|
||||
|
||||
enum class ContentStorageId : u32 {
|
||||
System,
|
||||
User,
|
||||
@ -61,32 +64,24 @@ enum class OpenDirectoryMode : u64 {
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(OpenDirectoryMode);
|
||||
|
||||
using ProcessId = u64;
|
||||
using ProgramId = u64;
|
||||
|
||||
class FileSystemController {
|
||||
public:
|
||||
explicit FileSystemController(Core::System& system_);
|
||||
~FileSystemController();
|
||||
|
||||
Result RegisterRomFS(std::unique_ptr<FileSys::RomFSFactory>&& factory);
|
||||
Result RegisterSaveData(std::unique_ptr<FileSys::SaveDataFactory>&& factory);
|
||||
Result RegisterSDMC(std::unique_ptr<FileSys::SDMCFactory>&& factory);
|
||||
Result RegisterBIS(std::unique_ptr<FileSys::BISFactory>&& factory);
|
||||
Result RegisterProcess(ProcessId process_id, ProgramId program_id,
|
||||
std::shared_ptr<FileSys::RomFSFactory>&& factory);
|
||||
Result OpenProcess(ProgramId* out_program_id,
|
||||
std::shared_ptr<SaveDataController>* out_save_data_controller,
|
||||
std::shared_ptr<RomFsController>* out_romfs_controller,
|
||||
ProcessId process_id);
|
||||
void SetPackedUpdate(ProcessId process_id, FileSys::VirtualFile update_raw);
|
||||
|
||||
void SetPackedUpdate(FileSys::VirtualFile update_raw);
|
||||
FileSys::VirtualFile OpenRomFSCurrentProcess() const;
|
||||
FileSys::VirtualFile OpenPatchedRomFS(u64 title_id, FileSys::ContentRecordType type) const;
|
||||
FileSys::VirtualFile OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index,
|
||||
FileSys::ContentRecordType type) const;
|
||||
FileSys::VirtualFile OpenRomFS(u64 title_id, FileSys::StorageId storage_id,
|
||||
FileSys::ContentRecordType type) const;
|
||||
std::shared_ptr<FileSys::NCA> OpenBaseNca(u64 title_id, FileSys::StorageId storage_id,
|
||||
FileSys::ContentRecordType type) const;
|
||||
std::shared_ptr<SaveDataController> OpenSaveDataController();
|
||||
|
||||
Result CreateSaveData(FileSys::VirtualDir* out_save_data, FileSys::SaveDataSpaceId space,
|
||||
const FileSys::SaveDataAttribute& save_struct) const;
|
||||
Result OpenSaveData(FileSys::VirtualDir* out_save_data, FileSys::SaveDataSpaceId space,
|
||||
const FileSys::SaveDataAttribute& save_struct) const;
|
||||
Result OpenSaveDataSpace(FileSys::VirtualDir* out_save_data_space,
|
||||
FileSys::SaveDataSpaceId space) const;
|
||||
Result OpenSDMC(FileSys::VirtualDir* out_sdmc) const;
|
||||
Result OpenBISPartition(FileSys::VirtualDir* out_bis_partition,
|
||||
FileSys::BisPartitionId id) const;
|
||||
@ -96,11 +91,6 @@ public:
|
||||
u64 GetFreeSpaceSize(FileSys::StorageId id) const;
|
||||
u64 GetTotalSpaceSize(FileSys::StorageId id) const;
|
||||
|
||||
FileSys::SaveDataSize ReadSaveDataSize(FileSys::SaveDataType type, u64 title_id,
|
||||
u128 user_id) const;
|
||||
void WriteSaveDataSize(FileSys::SaveDataType type, u64 title_id, u128 user_id,
|
||||
FileSys::SaveDataSize new_value) const;
|
||||
|
||||
void SetGameCard(FileSys::VirtualFile file);
|
||||
FileSys::XCI* GetGameCard() const;
|
||||
|
||||
@ -133,15 +123,24 @@ public:
|
||||
|
||||
FileSys::VirtualDir GetBCATDirectory(u64 title_id) const;
|
||||
|
||||
void SetAutoSaveDataCreation(bool enable);
|
||||
|
||||
// Creates the SaveData, SDMC, and BIS Factories. Should be called once and before any function
|
||||
// above is called.
|
||||
void CreateFactories(FileSys::VfsFilesystem& vfs, bool overwrite = true);
|
||||
|
||||
void Reset();
|
||||
|
||||
private:
|
||||
std::unique_ptr<FileSys::RomFSFactory> romfs_factory;
|
||||
std::unique_ptr<FileSys::SaveDataFactory> save_data_factory;
|
||||
std::shared_ptr<FileSys::SaveDataFactory> CreateSaveDataFactory(ProgramId program_id);
|
||||
|
||||
struct Registration {
|
||||
ProgramId program_id;
|
||||
std::shared_ptr<FileSys::RomFSFactory> romfs_factory;
|
||||
std::shared_ptr<FileSys::SaveDataFactory> save_data_factory;
|
||||
};
|
||||
|
||||
std::mutex registration_lock;
|
||||
std::map<ProcessId, Registration> registrations;
|
||||
|
||||
std::unique_ptr<FileSys::SDMCFactory> sdmc_factory;
|
||||
std::unique_ptr<FileSys::BISFactory> bis_factory;
|
||||
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include "core/hle/result.h"
|
||||
#include "core/hle/service/filesystem/filesystem.h"
|
||||
#include "core/hle/service/filesystem/fsp_srv.h"
|
||||
#include "core/hle/service/filesystem/romfs_controller.h"
|
||||
#include "core/hle/service/filesystem/save_data_controller.h"
|
||||
#include "core/hle/service/hle_ipc.h"
|
||||
#include "core/hle/service/ipc_helpers.h"
|
||||
#include "core/reporter.h"
|
||||
@ -577,9 +579,11 @@ private:
|
||||
|
||||
class ISaveDataInfoReader final : public ServiceFramework<ISaveDataInfoReader> {
|
||||
public:
|
||||
explicit ISaveDataInfoReader(Core::System& system_, FileSys::SaveDataSpaceId space,
|
||||
FileSystemController& fsc_)
|
||||
: ServiceFramework{system_, "ISaveDataInfoReader"}, fsc{fsc_} {
|
||||
explicit ISaveDataInfoReader(Core::System& system_,
|
||||
std::shared_ptr<SaveDataController> save_data_controller_,
|
||||
FileSys::SaveDataSpaceId space)
|
||||
: ServiceFramework{system_, "ISaveDataInfoReader"}, save_data_controller{
|
||||
save_data_controller_} {
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &ISaveDataInfoReader::ReadSaveDataInfo, "ReadSaveDataInfo"},
|
||||
};
|
||||
@ -626,7 +630,7 @@ private:
|
||||
|
||||
void FindAllSaves(FileSys::SaveDataSpaceId space) {
|
||||
FileSys::VirtualDir save_root{};
|
||||
const auto result = fsc.OpenSaveDataSpace(&save_root, space);
|
||||
const auto result = save_data_controller->OpenSaveDataSpace(&save_root, space);
|
||||
|
||||
if (result != ResultSuccess || save_root == nullptr) {
|
||||
LOG_ERROR(Service_FS, "The save root for the space_id={:02X} was invalid!", space);
|
||||
@ -723,7 +727,8 @@ private:
|
||||
};
|
||||
static_assert(sizeof(SaveDataInfo) == 0x60, "SaveDataInfo has incorrect size.");
|
||||
|
||||
FileSystemController& fsc;
|
||||
ProcessId process_id = 0;
|
||||
std::shared_ptr<SaveDataController> save_data_controller;
|
||||
std::vector<SaveDataInfo> info;
|
||||
u64 next_entry_index = 0;
|
||||
};
|
||||
@ -863,21 +868,20 @@ FSP_SRV::FSP_SRV(Core::System& system_)
|
||||
if (Settings::values.enable_fs_access_log) {
|
||||
access_log_mode = AccessLogMode::SdCard;
|
||||
}
|
||||
|
||||
// This should be true on creation
|
||||
fsc.SetAutoSaveDataCreation(true);
|
||||
}
|
||||
|
||||
FSP_SRV::~FSP_SRV() = default;
|
||||
|
||||
void FSP_SRV::SetCurrentProcess(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
current_process_id = rp.Pop<u64>();
|
||||
current_process_id = ctx.GetPID();
|
||||
|
||||
LOG_DEBUG(Service_FS, "called. current_process_id=0x{:016X}", current_process_id);
|
||||
|
||||
const auto res =
|
||||
fsc.OpenProcess(&program_id, &save_data_controller, &romfs_controller, current_process_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push(res);
|
||||
}
|
||||
|
||||
void FSP_SRV::OpenFileSystemWithPatch(HLERequestContext& ctx) {
|
||||
@ -916,7 +920,8 @@ void FSP_SRV::CreateSaveDataFileSystem(HLERequestContext& ctx) {
|
||||
uid[1], uid[0]);
|
||||
|
||||
FileSys::VirtualDir save_data_dir{};
|
||||
fsc.CreateSaveData(&save_data_dir, FileSys::SaveDataSpaceId::NandUser, save_struct);
|
||||
save_data_controller->CreateSaveData(&save_data_dir, FileSys::SaveDataSpaceId::NandUser,
|
||||
save_struct);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
@ -931,7 +936,8 @@ void FSP_SRV::CreateSaveDataFileSystemBySystemSaveDataId(HLERequestContext& ctx)
|
||||
LOG_DEBUG(Service_FS, "called save_struct = {}", save_struct.DebugInfo());
|
||||
|
||||
FileSys::VirtualDir save_data_dir{};
|
||||
fsc.CreateSaveData(&save_data_dir, FileSys::SaveDataSpaceId::NandSystem, save_struct);
|
||||
save_data_controller->CreateSaveData(&save_data_dir, FileSys::SaveDataSpaceId::NandSystem,
|
||||
save_struct);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
@ -950,7 +956,8 @@ void FSP_SRV::OpenSaveDataFileSystem(HLERequestContext& ctx) {
|
||||
LOG_INFO(Service_FS, "called.");
|
||||
|
||||
FileSys::VirtualDir dir{};
|
||||
auto result = fsc.OpenSaveData(&dir, parameters.space_id, parameters.attribute);
|
||||
auto result =
|
||||
save_data_controller->OpenSaveData(&dir, parameters.space_id, parameters.attribute);
|
||||
if (result != ResultSuccess) {
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 0};
|
||||
rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND);
|
||||
@ -1001,7 +1008,7 @@ void FSP_SRV::OpenSaveDataInfoReaderBySaveDataSpaceId(HLERequestContext& ctx) {
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.PushIpcInterface<ISaveDataInfoReader>(
|
||||
std::make_shared<ISaveDataInfoReader>(system, space, fsc));
|
||||
std::make_shared<ISaveDataInfoReader>(system, save_data_controller, space));
|
||||
}
|
||||
|
||||
void FSP_SRV::OpenSaveDataInfoReaderOnlyCacheStorage(HLERequestContext& ctx) {
|
||||
@ -1009,8 +1016,8 @@ void FSP_SRV::OpenSaveDataInfoReaderOnlyCacheStorage(HLERequestContext& ctx) {
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.PushIpcInterface<ISaveDataInfoReader>(system, FileSys::SaveDataSpaceId::TemporaryStorage,
|
||||
fsc);
|
||||
rb.PushIpcInterface<ISaveDataInfoReader>(system, save_data_controller,
|
||||
FileSys::SaveDataSpaceId::TemporaryStorage);
|
||||
}
|
||||
|
||||
void FSP_SRV::WriteSaveDataFileSystemExtraDataBySaveDataAttribute(HLERequestContext& ctx) {
|
||||
@ -1050,7 +1057,7 @@ void FSP_SRV::OpenDataStorageByCurrentProcess(HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_FS, "called");
|
||||
|
||||
if (!romfs) {
|
||||
auto current_romfs = fsc.OpenRomFSCurrentProcess();
|
||||
auto current_romfs = romfs_controller->OpenRomFSCurrentProcess();
|
||||
if (!current_romfs) {
|
||||
// TODO (bunnei): Find the right error code to use here
|
||||
LOG_CRITICAL(Service_FS, "no file system interface available!");
|
||||
@ -1078,7 +1085,7 @@ void FSP_SRV::OpenDataStorageByDataId(HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_FS, "called with storage_id={:02X}, unknown={:08X}, title_id={:016X}",
|
||||
storage_id, unknown, title_id);
|
||||
|
||||
auto data = fsc.OpenRomFS(title_id, storage_id, FileSys::ContentRecordType::Data);
|
||||
auto data = romfs_controller->OpenRomFS(title_id, storage_id, FileSys::ContentRecordType::Data);
|
||||
|
||||
if (!data) {
|
||||
const auto archive = FileSys::SystemArchive::SynthesizeSystemArchive(title_id);
|
||||
@ -1101,7 +1108,8 @@ void FSP_SRV::OpenDataStorageByDataId(HLERequestContext& ctx) {
|
||||
|
||||
const FileSys::PatchManager pm{title_id, fsc, content_provider};
|
||||
|
||||
auto base = fsc.OpenBaseNca(title_id, storage_id, FileSys::ContentRecordType::Data);
|
||||
auto base =
|
||||
romfs_controller->OpenBaseNca(title_id, storage_id, FileSys::ContentRecordType::Data);
|
||||
auto storage = std::make_shared<IStorage>(
|
||||
system, pm.PatchRomFS(base.get(), std::move(data), FileSys::ContentRecordType::Data));
|
||||
|
||||
@ -1129,9 +1137,8 @@ void FSP_SRV::OpenDataStorageWithProgramIndex(HLERequestContext& ctx) {
|
||||
|
||||
LOG_DEBUG(Service_FS, "called, program_index={}", program_index);
|
||||
|
||||
auto patched_romfs =
|
||||
fsc.OpenPatchedRomFSWithProgramIndex(system.GetApplicationProcessProgramID(), program_index,
|
||||
FileSys::ContentRecordType::Program);
|
||||
auto patched_romfs = romfs_controller->OpenPatchedRomFSWithProgramIndex(
|
||||
program_id, program_index, FileSys::ContentRecordType::Program);
|
||||
|
||||
if (!patched_romfs) {
|
||||
// TODO: Find the right error code to use here
|
||||
@ -1152,7 +1159,7 @@ void FSP_SRV::OpenDataStorageWithProgramIndex(HLERequestContext& ctx) {
|
||||
void FSP_SRV::DisableAutoSaveDataCreation(HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_FS, "called");
|
||||
|
||||
fsc.SetAutoSaveDataCreation(false);
|
||||
save_data_controller->SetAutoCreate(false);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
|
@ -17,6 +17,9 @@ class FileSystemBackend;
|
||||
|
||||
namespace Service::FileSystem {
|
||||
|
||||
class RomFsController;
|
||||
class SaveDataController;
|
||||
|
||||
enum class AccessLogVersion : u32 {
|
||||
V7_0_0 = 2,
|
||||
|
||||
@ -67,6 +70,9 @@ private:
|
||||
u64 current_process_id = 0;
|
||||
u32 access_log_program_index = 0;
|
||||
AccessLogMode access_log_mode = AccessLogMode::None;
|
||||
u64 program_id = 0;
|
||||
std::shared_ptr<SaveDataController> save_data_controller;
|
||||
std::shared_ptr<RomFsController> romfs_controller;
|
||||
};
|
||||
|
||||
} // namespace Service::FileSystem
|
||||
|
37
src/core/hle/service/filesystem/romfs_controller.cpp
Normal file
37
src/core/hle/service/filesystem/romfs_controller.cpp
Normal file
@ -0,0 +1,37 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2024 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "core/hle/service/filesystem/romfs_controller.h"
|
||||
|
||||
namespace Service::FileSystem {
|
||||
|
||||
RomFsController::RomFsController(std::shared_ptr<FileSys::RomFSFactory> factory_, u64 program_id_)
|
||||
: factory{std::move(factory_)}, program_id{program_id_} {}
|
||||
RomFsController::~RomFsController() = default;
|
||||
|
||||
FileSys::VirtualFile RomFsController::OpenRomFSCurrentProcess() {
|
||||
return factory->OpenCurrentProcess(program_id);
|
||||
}
|
||||
|
||||
FileSys::VirtualFile RomFsController::OpenPatchedRomFS(u64 title_id,
|
||||
FileSys::ContentRecordType type) {
|
||||
return factory->OpenPatchedRomFS(title_id, type);
|
||||
}
|
||||
|
||||
FileSys::VirtualFile RomFsController::OpenPatchedRomFSWithProgramIndex(
|
||||
u64 title_id, u8 program_index, FileSys::ContentRecordType type) {
|
||||
return factory->OpenPatchedRomFSWithProgramIndex(title_id, program_index, type);
|
||||
}
|
||||
|
||||
FileSys::VirtualFile RomFsController::OpenRomFS(u64 title_id, FileSys::StorageId storage_id,
|
||||
FileSys::ContentRecordType type) {
|
||||
return factory->Open(title_id, storage_id, type);
|
||||
}
|
||||
|
||||
std::shared_ptr<FileSys::NCA> RomFsController::OpenBaseNca(u64 title_id,
|
||||
FileSys::StorageId storage_id,
|
||||
FileSys::ContentRecordType type) {
|
||||
return factory->GetEntry(title_id, storage_id, type);
|
||||
}
|
||||
|
||||
} // namespace Service::FileSystem
|
31
src/core/hle/service/filesystem/romfs_controller.h
Normal file
31
src/core/hle/service/filesystem/romfs_controller.h
Normal file
@ -0,0 +1,31 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2024 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/file_sys/nca_metadata.h"
|
||||
#include "core/file_sys/romfs_factory.h"
|
||||
#include "core/file_sys/vfs_types.h"
|
||||
|
||||
namespace Service::FileSystem {
|
||||
|
||||
class RomFsController {
|
||||
public:
|
||||
explicit RomFsController(std::shared_ptr<FileSys::RomFSFactory> factory_, u64 program_id_);
|
||||
~RomFsController();
|
||||
|
||||
FileSys::VirtualFile OpenRomFSCurrentProcess();
|
||||
FileSys::VirtualFile OpenPatchedRomFS(u64 title_id, FileSys::ContentRecordType type);
|
||||
FileSys::VirtualFile OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index,
|
||||
FileSys::ContentRecordType type);
|
||||
FileSys::VirtualFile OpenRomFS(u64 title_id, FileSys::StorageId storage_id,
|
||||
FileSys::ContentRecordType type);
|
||||
std::shared_ptr<FileSys::NCA> OpenBaseNca(u64 title_id, FileSys::StorageId storage_id,
|
||||
FileSys::ContentRecordType type);
|
||||
|
||||
private:
|
||||
const std::shared_ptr<FileSys::RomFSFactory> factory;
|
||||
const u64 program_id;
|
||||
};
|
||||
|
||||
} // namespace Service::FileSystem
|
99
src/core/hle/service/filesystem/save_data_controller.cpp
Normal file
99
src/core/hle/service/filesystem/save_data_controller.cpp
Normal file
@ -0,0 +1,99 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2024 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/file_sys/control_metadata.h"
|
||||
#include "core/file_sys/errors.h"
|
||||
#include "core/file_sys/patch_manager.h"
|
||||
#include "core/hle/service/filesystem/save_data_controller.h"
|
||||
#include "core/loader/loader.h"
|
||||
|
||||
namespace Service::FileSystem {
|
||||
|
||||
namespace {
|
||||
|
||||
// A default size for normal/journal save data size if application control metadata cannot be found.
|
||||
// This should be large enough to satisfy even the most extreme requirements (~4.2GB)
|
||||
constexpr u64 SufficientSaveDataSize = 0xF0000000;
|
||||
|
||||
FileSys::SaveDataSize GetDefaultSaveDataSize(Core::System& system, u64 program_id) {
|
||||
const FileSys::PatchManager pm{program_id, system.GetFileSystemController(),
|
||||
system.GetContentProvider()};
|
||||
const auto metadata = pm.GetControlMetadata();
|
||||
const auto& nacp = metadata.first;
|
||||
|
||||
if (nacp != nullptr) {
|
||||
return {nacp->GetDefaultNormalSaveSize(), nacp->GetDefaultJournalSaveSize()};
|
||||
}
|
||||
|
||||
return {SufficientSaveDataSize, SufficientSaveDataSize};
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
SaveDataController::SaveDataController(Core::System& system_,
|
||||
std::shared_ptr<FileSys::SaveDataFactory> factory_)
|
||||
: system{system_}, factory{std::move(factory_)} {}
|
||||
SaveDataController::~SaveDataController() = default;
|
||||
|
||||
Result SaveDataController::CreateSaveData(FileSys::VirtualDir* out_save_data,
|
||||
FileSys::SaveDataSpaceId space,
|
||||
const FileSys::SaveDataAttribute& attribute) {
|
||||
LOG_TRACE(Service_FS, "Creating Save Data for space_id={:01X}, save_struct={}", space,
|
||||
attribute.DebugInfo());
|
||||
|
||||
auto save_data = factory->Create(space, attribute);
|
||||
if (save_data == nullptr) {
|
||||
return FileSys::ERROR_ENTITY_NOT_FOUND;
|
||||
}
|
||||
|
||||
*out_save_data = save_data;
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
Result SaveDataController::OpenSaveData(FileSys::VirtualDir* out_save_data,
|
||||
FileSys::SaveDataSpaceId space,
|
||||
const FileSys::SaveDataAttribute& attribute) {
|
||||
auto save_data = factory->Open(space, attribute);
|
||||
if (save_data == nullptr) {
|
||||
return FileSys::ERROR_ENTITY_NOT_FOUND;
|
||||
}
|
||||
|
||||
*out_save_data = save_data;
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
Result SaveDataController::OpenSaveDataSpace(FileSys::VirtualDir* out_save_data_space,
|
||||
FileSys::SaveDataSpaceId space) {
|
||||
auto save_data_space = factory->GetSaveDataSpaceDirectory(space);
|
||||
if (save_data_space == nullptr) {
|
||||
return FileSys::ERROR_ENTITY_NOT_FOUND;
|
||||
}
|
||||
|
||||
*out_save_data_space = save_data_space;
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
FileSys::SaveDataSize SaveDataController::ReadSaveDataSize(FileSys::SaveDataType type, u64 title_id,
|
||||
u128 user_id) {
|
||||
const auto value = factory->ReadSaveDataSize(type, title_id, user_id);
|
||||
|
||||
if (value.normal == 0 && value.journal == 0) {
|
||||
const auto size = GetDefaultSaveDataSize(system, title_id);
|
||||
factory->WriteSaveDataSize(type, title_id, user_id, size);
|
||||
return size;
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
void SaveDataController::WriteSaveDataSize(FileSys::SaveDataType type, u64 title_id, u128 user_id,
|
||||
FileSys::SaveDataSize new_value) {
|
||||
factory->WriteSaveDataSize(type, title_id, user_id, new_value);
|
||||
}
|
||||
|
||||
void SaveDataController::SetAutoCreate(bool state) {
|
||||
factory->SetAutoCreate(state);
|
||||
}
|
||||
|
||||
} // namespace Service::FileSystem
|
35
src/core/hle/service/filesystem/save_data_controller.h
Normal file
35
src/core/hle/service/filesystem/save_data_controller.h
Normal file
@ -0,0 +1,35 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2024 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/file_sys/nca_metadata.h"
|
||||
#include "core/file_sys/savedata_factory.h"
|
||||
#include "core/file_sys/vfs_types.h"
|
||||
|
||||
namespace Service::FileSystem {
|
||||
|
||||
class SaveDataController {
|
||||
public:
|
||||
explicit SaveDataController(Core::System& system,
|
||||
std::shared_ptr<FileSys::SaveDataFactory> factory_);
|
||||
~SaveDataController();
|
||||
|
||||
Result CreateSaveData(FileSys::VirtualDir* out_save_data, FileSys::SaveDataSpaceId space,
|
||||
const FileSys::SaveDataAttribute& attribute);
|
||||
Result OpenSaveData(FileSys::VirtualDir* out_save_data, FileSys::SaveDataSpaceId space,
|
||||
const FileSys::SaveDataAttribute& attribute);
|
||||
Result OpenSaveDataSpace(FileSys::VirtualDir* out_save_data_space,
|
||||
FileSys::SaveDataSpaceId space);
|
||||
|
||||
FileSys::SaveDataSize ReadSaveDataSize(FileSys::SaveDataType type, u64 title_id, u128 user_id);
|
||||
void WriteSaveDataSize(FileSys::SaveDataType type, u64 title_id, u128 user_id,
|
||||
FileSys::SaveDataSize new_value);
|
||||
void SetAutoCreate(bool state);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
const std::shared_ptr<FileSys::SaveDataFactory> factory;
|
||||
};
|
||||
|
||||
} // namespace Service::FileSystem
|
@ -15,9 +15,10 @@
|
||||
namespace Service::Glue {
|
||||
|
||||
namespace {
|
||||
std::optional<u64> GetTitleIDForProcessID(const Core::System& system, u64 process_id) {
|
||||
const auto& list = system.Kernel().GetProcessList();
|
||||
const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) {
|
||||
std::optional<u64> GetTitleIDForProcessID(Core::System& system, u64 process_id) {
|
||||
auto list = system.Kernel().GetProcessList();
|
||||
|
||||
const auto iter = std::find_if(list.begin(), list.end(), [&process_id](auto& process) {
|
||||
return process->GetProcessId() == process_id;
|
||||
});
|
||||
|
||||
|
@ -22,12 +22,10 @@ void LoopProcess(Core::System& system) {
|
||||
std::shared_ptr<HidFirmwareSettings> firmware_settings =
|
||||
std::make_shared<HidFirmwareSettings>();
|
||||
|
||||
// TODO: Remove this hack until this service is emulated properly.
|
||||
const auto process_list = system.Kernel().GetProcessList();
|
||||
if (!process_list.empty()) {
|
||||
// TODO: Remove this hack when am is emulated properly.
|
||||
resource_manager->Initialize();
|
||||
resource_manager->RegisterAppletResourceUserId(process_list[0]->GetId(), true);
|
||||
}
|
||||
resource_manager->RegisterAppletResourceUserId(system.ApplicationProcess()->GetProcessId(),
|
||||
true);
|
||||
|
||||
server_manager->RegisterNamedService(
|
||||
"hid", std::make_shared<IHidServer>(system, resource_manager, firmware_settings));
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
#include "core/guest_memory.h"
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_handle_table.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
@ -344,9 +345,9 @@ std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons
|
||||
|
||||
std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) const {
|
||||
static thread_local std::array read_buffer_a{
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
};
|
||||
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
@ -360,9 +361,9 @@ std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) con
|
||||
|
||||
std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) const {
|
||||
static thread_local std::array read_buffer_x{
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
};
|
||||
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
@ -376,14 +377,14 @@ std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) con
|
||||
|
||||
std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
|
||||
static thread_local std::array read_buffer_a{
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
};
|
||||
static thread_local std::array read_buffer_x{
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead>(memory, 0, 0),
|
||||
};
|
||||
|
||||
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
|
||||
|
@ -41,6 +41,8 @@ class KernelCore;
|
||||
class KHandleTable;
|
||||
class KProcess;
|
||||
class KServerSession;
|
||||
template <typename T>
|
||||
class KScopedAutoObject;
|
||||
class KThread;
|
||||
} // namespace Kernel
|
||||
|
||||
|
@ -2,27 +2,134 @@
|
||||
// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include <atomic>
|
||||
#include <deque>
|
||||
#include <mutex>
|
||||
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/core/heap_mapper.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
|
||||
Session::Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_)
|
||||
: id{id_}, process{process_}, smmu_id{smmu_id_},
|
||||
has_preallocated_area{}, mapper{}, is_active{} {}
|
||||
|
||||
Session::~Session() = default;
|
||||
|
||||
struct ContainerImpl {
|
||||
explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_)
|
||||
: file{host1x_}, manager{host1x_}, device_file_data{} {}
|
||||
explicit ContainerImpl(Container& core, Tegra::Host1x::Host1x& host1x_)
|
||||
: host1x{host1x_}, file{core, host1x_}, manager{host1x_}, device_file_data{} {}
|
||||
Tegra::Host1x::Host1x& host1x;
|
||||
NvMap file;
|
||||
SyncpointManager manager;
|
||||
Container::Host1xDeviceFileData device_file_data;
|
||||
std::deque<Session> sessions;
|
||||
size_t new_ids{};
|
||||
std::deque<size_t> id_pool;
|
||||
std::mutex session_guard;
|
||||
};
|
||||
|
||||
Container::Container(Tegra::Host1x::Host1x& host1x_) {
|
||||
impl = std::make_unique<ContainerImpl>(host1x_);
|
||||
impl = std::make_unique<ContainerImpl>(*this, host1x_);
|
||||
}
|
||||
|
||||
Container::~Container() = default;
|
||||
|
||||
size_t Container::OpenSession(Kernel::KProcess* process) {
|
||||
std::scoped_lock lk(impl->session_guard);
|
||||
for (auto& session : impl->sessions) {
|
||||
if (!session.is_active) {
|
||||
continue;
|
||||
}
|
||||
if (session.process == process) {
|
||||
return session.id;
|
||||
}
|
||||
}
|
||||
size_t new_id{};
|
||||
auto* memory_interface = &process->GetMemory();
|
||||
auto& smmu = impl->host1x.MemoryManager();
|
||||
auto smmu_id = smmu.RegisterProcess(memory_interface);
|
||||
if (!impl->id_pool.empty()) {
|
||||
new_id = impl->id_pool.front();
|
||||
impl->id_pool.pop_front();
|
||||
impl->sessions[new_id] = Session{new_id, process, smmu_id};
|
||||
} else {
|
||||
new_id = impl->new_ids++;
|
||||
impl->sessions.emplace_back(new_id, process, smmu_id);
|
||||
}
|
||||
auto& session = impl->sessions[new_id];
|
||||
session.is_active = true;
|
||||
// Optimization
|
||||
if (process->IsApplication()) {
|
||||
auto& page_table = process->GetPageTable().GetBasePageTable();
|
||||
auto heap_start = page_table.GetHeapRegionStart();
|
||||
|
||||
Kernel::KProcessAddress cur_addr = heap_start;
|
||||
size_t region_size = 0;
|
||||
VAddr region_start = 0;
|
||||
while (true) {
|
||||
Kernel::KMemoryInfo mem_info{};
|
||||
Kernel::Svc::PageInfo page_info{};
|
||||
R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info),
|
||||
cur_addr));
|
||||
auto svc_mem_info = mem_info.GetSvcMemoryInfo();
|
||||
|
||||
// check if this memory block is heap
|
||||
if (svc_mem_info.state == Kernel::Svc::MemoryState::Normal) {
|
||||
if (svc_mem_info.size > region_size) {
|
||||
region_size = svc_mem_info.size;
|
||||
region_start = svc_mem_info.base_address;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we're done.
|
||||
const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
|
||||
if (next_address <= GetInteger(cur_addr)) {
|
||||
break;
|
||||
}
|
||||
|
||||
cur_addr = next_address;
|
||||
}
|
||||
session.has_preallocated_area = false;
|
||||
auto start_region = (region_size >> 15) >= 1024 ? smmu.Allocate(region_size) : 0;
|
||||
if (start_region != 0) {
|
||||
session.mapper = std::make_unique<HeapMapper>(region_start, start_region, region_size,
|
||||
smmu_id, impl->host1x);
|
||||
smmu.TrackContinuity(start_region, region_start, region_size, smmu_id);
|
||||
session.has_preallocated_area = true;
|
||||
LOG_CRITICAL(Debug, "Preallocation created!");
|
||||
}
|
||||
}
|
||||
return new_id;
|
||||
}
|
||||
|
||||
void Container::CloseSession(size_t id) {
|
||||
std::scoped_lock lk(impl->session_guard);
|
||||
auto& session = impl->sessions[id];
|
||||
auto& smmu = impl->host1x.MemoryManager();
|
||||
if (session.has_preallocated_area) {
|
||||
const DAddr region_start = session.mapper->GetRegionStart();
|
||||
const size_t region_size = session.mapper->GetRegionSize();
|
||||
session.mapper.reset();
|
||||
smmu.Free(region_start, region_size);
|
||||
session.has_preallocated_area = false;
|
||||
}
|
||||
session.is_active = false;
|
||||
smmu.UnregisterProcess(impl->sessions[id].smmu_id);
|
||||
impl->id_pool.emplace_front(id);
|
||||
}
|
||||
|
||||
Session* Container::GetSession(size_t id) {
|
||||
std::atomic_thread_fence(std::memory_order_acquire);
|
||||
return &impl->sessions[id];
|
||||
}
|
||||
|
||||
NvMap& Container::GetNvMapFile() {
|
||||
return impl->file;
|
||||
}
|
||||
|
@ -10,22 +10,49 @@
|
||||
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
|
||||
namespace Kernel {
|
||||
class KProcess;
|
||||
}
|
||||
|
||||
namespace Tegra::Host1x {
|
||||
class Host1x;
|
||||
} // namespace Tegra::Host1x
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
|
||||
class HeapMapper;
|
||||
class NvMap;
|
||||
class SyncpointManager;
|
||||
|
||||
struct ContainerImpl;
|
||||
|
||||
struct Session {
|
||||
Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_);
|
||||
~Session();
|
||||
|
||||
Session(const Session&) = delete;
|
||||
Session& operator=(const Session&) = delete;
|
||||
Session(Session&&) = default;
|
||||
Session& operator=(Session&&) = default;
|
||||
|
||||
size_t id;
|
||||
Kernel::KProcess* process;
|
||||
size_t smmu_id;
|
||||
bool has_preallocated_area{};
|
||||
std::unique_ptr<HeapMapper> mapper{};
|
||||
bool is_active{};
|
||||
};
|
||||
|
||||
class Container {
|
||||
public:
|
||||
explicit Container(Tegra::Host1x::Host1x& host1x);
|
||||
~Container();
|
||||
|
||||
size_t OpenSession(Kernel::KProcess* process);
|
||||
void CloseSession(size_t id);
|
||||
|
||||
Session* GetSession(size_t id);
|
||||
|
||||
NvMap& GetNvMapFile();
|
||||
|
||||
const NvMap& GetNvMapFile() const;
|
||||
|
175
src/core/hle/service/nvdrv/core/heap_mapper.cpp
Normal file
175
src/core/hle/service/nvdrv/core/heap_mapper.cpp
Normal file
@ -0,0 +1,175 @@
|
||||
// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
#define BOOST_NO_MT
|
||||
#include <boost/pool/detail/mutex.hpp>
|
||||
#undef BOOST_NO_MT
|
||||
#include <boost/icl/interval.hpp>
|
||||
#include <boost/icl/interval_base_set.hpp>
|
||||
#include <boost/icl/interval_set.hpp>
|
||||
#include <boost/icl/split_interval_map.hpp>
|
||||
#include <boost/pool/pool.hpp>
|
||||
#include <boost/pool/pool_alloc.hpp>
|
||||
#include <boost/pool/poolfwd.hpp>
|
||||
|
||||
#include "core/hle/service/nvdrv/core/heap_mapper.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace boost {
|
||||
template <typename T>
|
||||
class fast_pool_allocator<T, default_user_allocator_new_delete, details::pool::null_mutex, 4096, 0>;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
|
||||
using IntervalCompare = std::less<DAddr>;
|
||||
using IntervalInstance = boost::icl::interval_type_default<DAddr, std::less>;
|
||||
using IntervalAllocator = boost::fast_pool_allocator<DAddr>;
|
||||
using IntervalSet = boost::icl::interval_set<DAddr>;
|
||||
using IntervalType = typename IntervalSet::interval_type;
|
||||
|
||||
template <typename Type>
|
||||
struct counter_add_functor : public boost::icl::identity_based_inplace_combine<Type> {
|
||||
// types
|
||||
typedef counter_add_functor<Type> type;
|
||||
typedef boost::icl::identity_based_inplace_combine<Type> base_type;
|
||||
|
||||
// public member functions
|
||||
void operator()(Type& current, const Type& added) const {
|
||||
current += added;
|
||||
if (current < base_type::identity_element()) {
|
||||
current = base_type::identity_element();
|
||||
}
|
||||
}
|
||||
|
||||
// public static functions
|
||||
static void version(Type&){};
|
||||
};
|
||||
|
||||
using OverlapCombine = counter_add_functor<int>;
|
||||
using OverlapSection = boost::icl::inter_section<int>;
|
||||
using OverlapCounter = boost::icl::split_interval_map<DAddr, int>;
|
||||
|
||||
struct HeapMapper::HeapMapperInternal {
|
||||
HeapMapperInternal(Tegra::Host1x::Host1x& host1x) : device_memory{host1x.MemoryManager()} {}
|
||||
~HeapMapperInternal() = default;
|
||||
|
||||
template <typename Func>
|
||||
void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size,
|
||||
Func&& func) {
|
||||
const DAddr start_address = cpu_addr;
|
||||
const DAddr end_address = start_address + size;
|
||||
const IntervalType search_interval{start_address, end_address};
|
||||
auto it = current_range.lower_bound(search_interval);
|
||||
if (it == current_range.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = current_range.upper_bound(search_interval);
|
||||
for (; it != end_it; it++) {
|
||||
auto& inter = it->first;
|
||||
DAddr inter_addr_end = inter.upper();
|
||||
DAddr inter_addr = inter.lower();
|
||||
if (inter_addr_end > end_address) {
|
||||
inter_addr_end = end_address;
|
||||
}
|
||||
if (inter_addr < start_address) {
|
||||
inter_addr = start_address;
|
||||
}
|
||||
func(inter_addr, inter_addr_end, it->second);
|
||||
}
|
||||
}
|
||||
|
||||
void RemoveEachInOverlapCounter(OverlapCounter& current_range,
|
||||
const IntervalType search_interval, int subtract_value) {
|
||||
bool any_removals = false;
|
||||
current_range.add(std::make_pair(search_interval, subtract_value));
|
||||
do {
|
||||
any_removals = false;
|
||||
auto it = current_range.lower_bound(search_interval);
|
||||
if (it == current_range.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = current_range.upper_bound(search_interval);
|
||||
for (; it != end_it; it++) {
|
||||
if (it->second <= 0) {
|
||||
any_removals = true;
|
||||
current_range.erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} while (any_removals);
|
||||
}
|
||||
|
||||
IntervalSet base_set;
|
||||
OverlapCounter mapping_overlaps;
|
||||
Tegra::MaxwellDeviceMemoryManager& device_memory;
|
||||
std::mutex guard;
|
||||
};
|
||||
|
||||
HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, size_t smmu_id,
|
||||
Tegra::Host1x::Host1x& host1x)
|
||||
: m_vaddress{start_vaddress}, m_daddress{start_daddress}, m_size{size}, m_smmu_id{smmu_id} {
|
||||
m_internal = std::make_unique<HeapMapperInternal>(host1x);
|
||||
}
|
||||
|
||||
HeapMapper::~HeapMapper() {
|
||||
m_internal->device_memory.Unmap(m_daddress, m_size);
|
||||
}
|
||||
|
||||
DAddr HeapMapper::Map(VAddr start, size_t size) {
|
||||
std::scoped_lock lk(m_internal->guard);
|
||||
m_internal->base_set.clear();
|
||||
const IntervalType interval{start, start + size};
|
||||
m_internal->base_set.insert(interval);
|
||||
m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
|
||||
[this](VAddr start_addr, VAddr end_addr, int) {
|
||||
const IntervalType other{start_addr, end_addr};
|
||||
m_internal->base_set.subtract(other);
|
||||
});
|
||||
if (!m_internal->base_set.empty()) {
|
||||
auto it = m_internal->base_set.begin();
|
||||
auto end_it = m_internal->base_set.end();
|
||||
for (; it != end_it; it++) {
|
||||
const VAddr inter_addr_end = it->upper();
|
||||
const VAddr inter_addr = it->lower();
|
||||
const size_t offset = inter_addr - m_vaddress;
|
||||
const size_t sub_size = inter_addr_end - inter_addr;
|
||||
m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size,
|
||||
m_smmu_id);
|
||||
}
|
||||
}
|
||||
m_internal->mapping_overlaps += std::make_pair(interval, 1);
|
||||
m_internal->base_set.clear();
|
||||
return m_daddress + (start - m_vaddress);
|
||||
}
|
||||
|
||||
void HeapMapper::Unmap(VAddr start, size_t size) {
|
||||
std::scoped_lock lk(m_internal->guard);
|
||||
m_internal->base_set.clear();
|
||||
m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
|
||||
[this](VAddr start_addr, VAddr end_addr, int value) {
|
||||
if (value <= 1) {
|
||||
const IntervalType other{start_addr, end_addr};
|
||||
m_internal->base_set.insert(other);
|
||||
}
|
||||
});
|
||||
if (!m_internal->base_set.empty()) {
|
||||
auto it = m_internal->base_set.begin();
|
||||
auto end_it = m_internal->base_set.end();
|
||||
for (; it != end_it; it++) {
|
||||
const VAddr inter_addr_end = it->upper();
|
||||
const VAddr inter_addr = it->lower();
|
||||
const size_t offset = inter_addr - m_vaddress;
|
||||
const size_t sub_size = inter_addr_end - inter_addr;
|
||||
m_internal->device_memory.Unmap(m_daddress + offset, sub_size);
|
||||
}
|
||||
}
|
||||
const IntervalType to_remove{start, start + size};
|
||||
m_internal->RemoveEachInOverlapCounter(m_internal->mapping_overlaps, to_remove, -1);
|
||||
m_internal->base_set.clear();
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::NvCore
|
48
src/core/hle/service/nvdrv/core/heap_mapper.h
Normal file
48
src/core/hle/service/nvdrv/core/heap_mapper.h
Normal file
@ -0,0 +1,48 @@
|
||||
// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra::Host1x {
|
||||
class Host1x;
|
||||
} // namespace Tegra::Host1x
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
|
||||
class HeapMapper {
|
||||
public:
|
||||
HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, size_t smmu_id,
|
||||
Tegra::Host1x::Host1x& host1x);
|
||||
~HeapMapper();
|
||||
|
||||
bool IsInBounds(VAddr start, size_t size) const {
|
||||
VAddr end = start + size;
|
||||
return start >= m_vaddress && end <= (m_vaddress + m_size);
|
||||
}
|
||||
|
||||
DAddr Map(VAddr start, size_t size);
|
||||
|
||||
void Unmap(VAddr start, size_t size);
|
||||
|
||||
DAddr GetRegionStart() const {
|
||||
return m_daddress;
|
||||
}
|
||||
|
||||
size_t GetRegionSize() const {
|
||||
return m_size;
|
||||
}
|
||||
|
||||
private:
|
||||
struct HeapMapperInternal;
|
||||
VAddr m_vaddress;
|
||||
DAddr m_daddress;
|
||||
size_t m_size;
|
||||
size_t m_smmu_id;
|
||||
std::unique_ptr<HeapMapperInternal> m_internal;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::NvCore
|
@ -2,14 +2,19 @@
|
||||
// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/core/heap_mapper.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
using Core::Memory::YUZU_PAGESIZE;
|
||||
constexpr size_t BIG_PAGE_SIZE = YUZU_PAGESIZE * 16;
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
NvMap::Handle::Handle(u64 size_, Id id_)
|
||||
@ -17,9 +22,8 @@ NvMap::Handle::Handle(u64 size_, Id id_)
|
||||
flags.raw = 0;
|
||||
}
|
||||
|
||||
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) {
|
||||
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t pSessionId) {
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
// Handles cannot be allocated twice
|
||||
if (allocated) {
|
||||
return NvResult::AccessDenied;
|
||||
@ -28,6 +32,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress)
|
||||
flags = pFlags;
|
||||
kind = pKind;
|
||||
align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
|
||||
session_id = pSessionId;
|
||||
|
||||
// This flag is only applicable for handles with an address passed
|
||||
if (pAddress) {
|
||||
@ -63,7 +68,7 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) {
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||
NvMap::NvMap(Container& core_, Tegra::Host1x::Host1x& host1x_) : host1x{host1x_}, core{core_} {}
|
||||
|
||||
void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) {
|
||||
std::scoped_lock lock(handles_lock);
|
||||
@ -78,12 +83,30 @@ void NvMap::UnmapHandle(Handle& handle_description) {
|
||||
handle_description.unmap_queue_entry.reset();
|
||||
}
|
||||
|
||||
// Free and unmap the handle from the SMMU
|
||||
host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
|
||||
// Free and unmap the handle from Host1x GMMU
|
||||
if (handle_description.pin_virt_address) {
|
||||
host1x.GMMU().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
|
||||
handle_description.aligned_size);
|
||||
host1x.Allocator().Free(handle_description.pin_virt_address,
|
||||
static_cast<u32>(handle_description.aligned_size));
|
||||
handle_description.pin_virt_address = 0;
|
||||
}
|
||||
|
||||
// Free and unmap the handle from the SMMU
|
||||
const size_t map_size = handle_description.aligned_size;
|
||||
if (!handle_description.in_heap) {
|
||||
auto& smmu = host1x.MemoryManager();
|
||||
size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
|
||||
smmu.Unmap(handle_description.d_address, map_size);
|
||||
smmu.Free(handle_description.d_address, static_cast<size_t>(aligned_up));
|
||||
handle_description.d_address = 0;
|
||||
return;
|
||||
}
|
||||
const VAddr vaddress = handle_description.address;
|
||||
auto* session = core.GetSession(handle_description.session_id);
|
||||
session->mapper->Unmap(vaddress, map_size);
|
||||
handle_description.d_address = 0;
|
||||
handle_description.in_heap = false;
|
||||
}
|
||||
|
||||
bool NvMap::TryRemoveHandle(const Handle& handle_description) {
|
||||
@ -124,22 +147,33 @@ std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
|
||||
}
|
||||
}
|
||||
|
||||
VAddr NvMap::GetHandleAddress(Handle::Id handle) {
|
||||
DAddr NvMap::GetHandleAddress(Handle::Id handle) {
|
||||
std::scoped_lock lock(handles_lock);
|
||||
try {
|
||||
return handles.at(handle)->address;
|
||||
return handles.at(handle)->d_address;
|
||||
} catch (std::out_of_range&) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
|
||||
DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
|
||||
auto handle_description{GetHandle(handle)};
|
||||
if (!handle_description) [[unlikely]] {
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(handle_description->mutex);
|
||||
const auto map_low_area = [&] {
|
||||
if (handle_description->pin_virt_address == 0) {
|
||||
auto& gmmu_allocator = host1x.Allocator();
|
||||
auto& gmmu = host1x.GMMU();
|
||||
u32 address =
|
||||
gmmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size));
|
||||
gmmu.Map(static_cast<GPUVAddr>(address), handle_description->d_address,
|
||||
handle_description->aligned_size);
|
||||
handle_description->pin_virt_address = address;
|
||||
}
|
||||
};
|
||||
if (!handle_description->pins) {
|
||||
// If we're in the unmap queue we can just remove ourselves and return since we're already
|
||||
// mapped
|
||||
@ -151,37 +185,58 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
|
||||
unmap_queue.erase(*handle_description->unmap_queue_entry);
|
||||
handle_description->unmap_queue_entry.reset();
|
||||
|
||||
if (low_area_pin) {
|
||||
map_low_area();
|
||||
handle_description->pins++;
|
||||
return handle_description->pin_virt_address;
|
||||
return static_cast<DAddr>(handle_description->pin_virt_address);
|
||||
}
|
||||
|
||||
handle_description->pins++;
|
||||
return handle_description->d_address;
|
||||
}
|
||||
}
|
||||
|
||||
using namespace std::placeholders;
|
||||
// If not then allocate some space and map it
|
||||
u32 address{};
|
||||
auto& smmu_allocator = host1x.Allocator();
|
||||
auto& smmu_memory_manager = host1x.MemoryManager();
|
||||
while ((address = smmu_allocator.Allocate(
|
||||
static_cast<u32>(handle_description->aligned_size))) == 0) {
|
||||
DAddr address{};
|
||||
auto& smmu = host1x.MemoryManager();
|
||||
auto* session = core.GetSession(handle_description->session_id);
|
||||
const VAddr vaddress = handle_description->address;
|
||||
const size_t map_size = handle_description->aligned_size;
|
||||
if (session->has_preallocated_area && session->mapper->IsInBounds(vaddress, map_size)) {
|
||||
handle_description->d_address = session->mapper->Map(vaddress, map_size);
|
||||
handle_description->in_heap = true;
|
||||
} else {
|
||||
size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
|
||||
while ((address = smmu.Allocate(aligned_up)) == 0) {
|
||||
// Free handles until the allocation succeeds
|
||||
std::scoped_lock queueLock(unmap_queue_lock);
|
||||
if (auto freeHandleDesc{unmap_queue.front()}) {
|
||||
// Handles in the unmap queue are guaranteed not to be pinned so don't bother
|
||||
// checking if they are before unmapping
|
||||
std::scoped_lock freeLock(freeHandleDesc->mutex);
|
||||
if (handle_description->pin_virt_address)
|
||||
if (handle_description->d_address)
|
||||
UnmapHandle(*freeHandleDesc);
|
||||
} else {
|
||||
LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
|
||||
}
|
||||
}
|
||||
|
||||
smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address,
|
||||
handle_description->aligned_size);
|
||||
handle_description->pin_virt_address = address;
|
||||
handle_description->d_address = address;
|
||||
smmu.Map(address, vaddress, map_size, session->smmu_id, true);
|
||||
handle_description->in_heap = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (low_area_pin) {
|
||||
map_low_area();
|
||||
}
|
||||
|
||||
handle_description->pins++;
|
||||
return handle_description->pin_virt_address;
|
||||
if (low_area_pin) {
|
||||
return static_cast<DAddr>(handle_description->pin_virt_address);
|
||||
}
|
||||
return handle_description->d_address;
|
||||
}
|
||||
|
||||
void NvMap::UnpinHandle(Handle::Id handle) {
|
||||
@ -232,7 +287,7 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
|
||||
LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!");
|
||||
} else if (handle_description->dupes == 0) {
|
||||
// Force unmap the handle
|
||||
if (handle_description->pin_virt_address) {
|
||||
if (handle_description->d_address) {
|
||||
std::scoped_lock queueLock(unmap_queue_lock);
|
||||
UnmapHandle(*handle_description);
|
||||
}
|
||||
|
@ -25,6 +25,8 @@ class Host1x;
|
||||
} // namespace Tegra
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
|
||||
class Container;
|
||||
/**
|
||||
* @brief The nvmap core class holds the global state for nvmap and provides methods to manage
|
||||
* handles
|
||||
@ -48,7 +50,7 @@ public:
|
||||
using Id = u32;
|
||||
Id id; //!< A globally unique identifier for this handle
|
||||
|
||||
s32 pins{};
|
||||
s64 pins{};
|
||||
u32 pin_virt_address{};
|
||||
std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{};
|
||||
|
||||
@ -61,15 +63,18 @@ public:
|
||||
} flags{};
|
||||
static_assert(sizeof(Flags) == sizeof(u32));
|
||||
|
||||
u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to,
|
||||
VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to,
|
||||
//!< this can also be in the nvdrv tmem
|
||||
bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
|
||||
//!< call
|
||||
|
||||
u8 kind{}; //!< Used for memory compression
|
||||
bool allocated{}; //!< If the handle has been allocated with `Alloc`
|
||||
bool in_heap{};
|
||||
size_t session_id{};
|
||||
|
||||
u64 dma_map_addr{}; //! remove me after implementing pinning.
|
||||
DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds
|
||||
//!< to, this can also be in the nvdrv tmem
|
||||
|
||||
Handle(u64 size, Id id);
|
||||
|
||||
@ -77,7 +82,8 @@ public:
|
||||
* @brief Sets up the handle with the given memory config, can allocate memory from the tmem
|
||||
* if a 0 address is passed
|
||||
*/
|
||||
[[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress);
|
||||
[[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
|
||||
size_t pSessionId);
|
||||
|
||||
/**
|
||||
* @brief Increases the dupe counter of the handle for the given session
|
||||
@ -108,7 +114,7 @@ public:
|
||||
bool can_unlock; //!< If the address region is ready to be unlocked
|
||||
};
|
||||
|
||||
explicit NvMap(Tegra::Host1x::Host1x& host1x);
|
||||
explicit NvMap(Container& core, Tegra::Host1x::Host1x& host1x);
|
||||
|
||||
/**
|
||||
* @brief Creates an unallocated handle of the given size
|
||||
@ -117,7 +123,7 @@ public:
|
||||
|
||||
std::shared_ptr<Handle> GetHandle(Handle::Id handle);
|
||||
|
||||
VAddr GetHandleAddress(Handle::Id handle);
|
||||
DAddr GetHandleAddress(Handle::Id handle);
|
||||
|
||||
/**
|
||||
* @brief Maps a handle into the SMMU address space
|
||||
@ -125,7 +131,7 @@ public:
|
||||
* number of calls to `UnpinHandle`
|
||||
* @return The SMMU virtual address that the handle has been mapped to
|
||||
*/
|
||||
u32 PinHandle(Handle::Id handle);
|
||||
DAddr PinHandle(Handle::Id handle, bool low_area_pin);
|
||||
|
||||
/**
|
||||
* @brief When this has been called an equal number of times to `PinHandle` for the supplied
|
||||
@ -172,5 +178,7 @@ private:
|
||||
* @return If the handle was removed from the map
|
||||
*/
|
||||
bool TryRemoveHandle(const Handle& handle_description);
|
||||
|
||||
Container& core;
|
||||
};
|
||||
} // namespace Service::Nvidia::NvCore
|
||||
|
@ -62,7 +62,7 @@ public:
|
||||
* Called once a device is opened
|
||||
* @param fd The device fd
|
||||
*/
|
||||
virtual void OnOpen(DeviceFD fd) = 0;
|
||||
virtual void OnOpen(size_t session_id, DeviceFD fd) = 0;
|
||||
|
||||
/**
|
||||
* Called once a device is closed
|
||||
|
@ -35,14 +35,14 @@ NvResult nvdisp_disp0::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvdisp_disp0::OnOpen(DeviceFD fd) {}
|
||||
void nvdisp_disp0::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||
void nvdisp_disp0::OnClose(DeviceFD fd) {}
|
||||
|
||||
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
|
||||
u32 height, u32 stride, android::BufferTransformFlags transform,
|
||||
const Common::Rectangle<int>& crop_rect,
|
||||
std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) {
|
||||
const VAddr addr = nvmap.GetHandleAddress(buffer_handle);
|
||||
const DAddr addr = nvmap.GetHandleAddress(buffer_handle);
|
||||
LOG_TRACE(Service,
|
||||
"Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
|
||||
addr, offset, width, height, stride, format);
|
||||
|
@ -32,7 +32,7 @@ public:
|
||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output) override;
|
||||
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
/// Performs a screen flip, drawing the buffer pointed to by the handle.
|
||||
|
@ -86,7 +86,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvhost_as_gpu::OnOpen(DeviceFD fd) {}
|
||||
void nvhost_as_gpu::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||
void nvhost_as_gpu::OnClose(DeviceFD fd) {}
|
||||
|
||||
NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) {
|
||||
@ -206,6 +206,8 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
|
||||
static_cast<u32>(aligned_size >> page_size_bits));
|
||||
}
|
||||
|
||||
nvmap.UnpinHandle(mapping->handle);
|
||||
|
||||
// Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
|
||||
// Only FreeSpace can unmap them fully
|
||||
if (mapping->sparse_alloc) {
|
||||
@ -293,12 +295,12 @@ NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
VAddr cpu_address{static_cast<VAddr>(
|
||||
handle->address +
|
||||
(static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
|
||||
DAddr base = nvmap.PinHandle(entry.handle, false);
|
||||
DAddr device_address{static_cast<DAddr>(
|
||||
base + (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
|
||||
|
||||
gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind),
|
||||
use_big_pages);
|
||||
gmmu->Map(virtual_address, device_address, size,
|
||||
static_cast<Tegra::PTEKind>(entry.kind), use_big_pages);
|
||||
}
|
||||
}
|
||||
|
||||
@ -331,9 +333,9 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
|
||||
}
|
||||
|
||||
u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
|
||||
VAddr cpu_address{mapping->ptr + params.buffer_offset};
|
||||
VAddr device_address{mapping->ptr + params.buffer_offset};
|
||||
|
||||
gmmu->Map(gpu_address, cpu_address, params.mapping_size,
|
||||
gmmu->Map(gpu_address, device_address, params.mapping_size,
|
||||
static_cast<Tegra::PTEKind>(params.kind), mapping->big_page);
|
||||
|
||||
return NvResult::Success;
|
||||
@ -349,7 +351,8 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)};
|
||||
DAddr device_address{
|
||||
static_cast<DAddr>(nvmap.PinHandle(params.handle, false) + params.buffer_offset)};
|
||||
u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
|
||||
|
||||
bool big_page{[&]() {
|
||||
@ -373,15 +376,14 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
|
||||
}
|
||||
|
||||
const bool use_big_pages = alloc->second.big_pages && big_page;
|
||||
gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind),
|
||||
gmmu->Map(params.offset, device_address, size, static_cast<Tegra::PTEKind>(params.kind),
|
||||
use_big_pages);
|
||||
|
||||
auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
|
||||
use_big_pages, alloc->second.sparse)};
|
||||
auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size,
|
||||
true, use_big_pages, alloc->second.sparse)};
|
||||
alloc->second.mappings.push_back(mapping);
|
||||
mapping_map[params.offset] = mapping;
|
||||
} else {
|
||||
|
||||
auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
|
||||
u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
|
||||
u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
|
||||
@ -394,11 +396,11 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
|
||||
return NvResult::InsufficientMemory;
|
||||
}
|
||||
|
||||
gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size),
|
||||
gmmu->Map(params.offset, device_address, Common::AlignUp(size, page_size),
|
||||
static_cast<Tegra::PTEKind>(params.kind), big_page);
|
||||
|
||||
auto mapping{
|
||||
std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
|
||||
auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size,
|
||||
false, big_page, false)};
|
||||
mapping_map[params.offset] = mapping;
|
||||
}
|
||||
|
||||
@ -433,6 +435,8 @@ NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) {
|
||||
gmmu->Unmap(params.offset, mapping->size);
|
||||
}
|
||||
|
||||
nvmap.UnpinHandle(mapping->handle);
|
||||
|
||||
mapping_map.erase(params.offset);
|
||||
} catch (const std::out_of_range&) {
|
||||
LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset);
|
||||
|
@ -55,7 +55,7 @@ public:
|
||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output) override;
|
||||
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||
@ -159,16 +159,18 @@ private:
|
||||
NvCore::NvMap& nvmap;
|
||||
|
||||
struct Mapping {
|
||||
VAddr ptr;
|
||||
NvCore::NvMap::Handle::Id handle;
|
||||
DAddr ptr;
|
||||
u64 offset;
|
||||
u64 size;
|
||||
bool fixed;
|
||||
bool big_page; // Only valid if fixed == false
|
||||
bool sparse_alloc;
|
||||
|
||||
Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_)
|
||||
: ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_),
|
||||
sparse_alloc(sparse_alloc_) {}
|
||||
Mapping(NvCore::NvMap::Handle::Id handle_, DAddr ptr_, u64 offset_, u64 size_, bool fixed_,
|
||||
bool big_page_, bool sparse_alloc_)
|
||||
: handle(handle_), ptr(ptr_), offset(offset_), size(size_), fixed(fixed_),
|
||||
big_page(big_page_), sparse_alloc(sparse_alloc_) {}
|
||||
};
|
||||
|
||||
struct Allocation {
|
||||
@ -212,9 +214,6 @@ private:
|
||||
bool initialised{};
|
||||
} vm;
|
||||
std::shared_ptr<Tegra::MemoryManager> gmmu;
|
||||
|
||||
// s32 channel{};
|
||||
// u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@ -76,7 +76,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inp
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvhost_ctrl::OnOpen(DeviceFD fd) {}
|
||||
void nvhost_ctrl::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||
|
||||
void nvhost_ctrl::OnClose(DeviceFD fd) {}
|
||||
|
||||
|
@ -32,7 +32,7 @@ public:
|
||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output) override;
|
||||
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||
|
@ -82,7 +82,7 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8>
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {}
|
||||
void nvhost_ctrl_gpu::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||
void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {}
|
||||
|
||||
NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) {
|
||||
|
@ -28,7 +28,7 @@ public:
|
||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output) override;
|
||||
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||
|
@ -120,7 +120,7 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvhost_gpu::OnOpen(DeviceFD fd) {}
|
||||
void nvhost_gpu::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||
void nvhost_gpu::OnClose(DeviceFD fd) {}
|
||||
|
||||
NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) {
|
||||
|
@ -47,7 +47,7 @@ public:
|
||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output) override;
|
||||
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||
|
@ -35,7 +35,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
|
||||
case 0x7:
|
||||
return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output);
|
||||
case 0x9:
|
||||
return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output);
|
||||
return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output, fd);
|
||||
case 0xa:
|
||||
return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output);
|
||||
default:
|
||||
@ -68,9 +68,10 @@ NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvhost_nvdec::OnOpen(DeviceFD fd) {
|
||||
void nvhost_nvdec::OnOpen(size_t session_id, DeviceFD fd) {
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video stream started");
|
||||
system.SetNVDECActive(true);
|
||||
sessions[fd] = session_id;
|
||||
}
|
||||
|
||||
void nvhost_nvdec::OnClose(DeviceFD fd) {
|
||||
@ -81,6 +82,10 @@ void nvhost_nvdec::OnClose(DeviceFD fd) {
|
||||
system.GPU().ClearCdmaInstance(iter->second);
|
||||
}
|
||||
system.SetNVDECActive(false);
|
||||
auto it = sessions.find(fd);
|
||||
if (it != sessions.end()) {
|
||||
sessions.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@ -20,7 +20,7 @@ public:
|
||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output) override;
|
||||
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
};
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||
@ -95,6 +96,8 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De
|
||||
offset += SliceVectors(data, fence_thresholds, params.fence_count, offset);
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
auto* session = core.GetSession(sessions[fd]);
|
||||
|
||||
if (gpu.UseNvdec()) {
|
||||
for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
|
||||
const SyncptIncr& syncpt_incr = syncpt_increments[i];
|
||||
@ -106,7 +109,7 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De
|
||||
const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
|
||||
ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
|
||||
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
|
||||
system.ApplicationMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
|
||||
session->process->GetMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
|
||||
cmdlist.size() * sizeof(u32));
|
||||
gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
|
||||
}
|
||||
@ -133,10 +136,12 @@ NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) {
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries) {
|
||||
NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries,
|
||||
DeviceFD fd) {
|
||||
const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
|
||||
for (size_t i = 0; i < num_entries; i++) {
|
||||
entries[i].map_address = nvmap.PinHandle(entries[i].map_handle);
|
||||
DAddr pin_address = nvmap.PinHandle(entries[i].map_handle, true);
|
||||
entries[i].map_address = static_cast<u32>(pin_address);
|
||||
}
|
||||
|
||||
return NvResult::Success;
|
||||
|
@ -4,7 +4,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <deque>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||
@ -111,7 +113,7 @@ protected:
|
||||
NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd);
|
||||
NvResult GetSyncpoint(IoctlGetSyncpoint& params);
|
||||
NvResult GetWaitbase(IoctlGetWaitbase& params);
|
||||
NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
|
||||
NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries, DeviceFD fd);
|
||||
NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
|
||||
NvResult SetSubmitTimeout(u32 timeout);
|
||||
|
||||
@ -125,6 +127,7 @@ protected:
|
||||
NvCore::NvMap& nvmap;
|
||||
NvCore::ChannelType channel_type;
|
||||
std::array<u32, MaxSyncPoints> device_syncpoints{};
|
||||
std::unordered_map<DeviceFD, size_t> sessions;
|
||||
};
|
||||
}; // namespace Devices
|
||||
} // namespace Service::Nvidia
|
||||
|
@ -44,7 +44,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvhost_nvjpg::OnOpen(DeviceFD fd) {}
|
||||
void nvhost_nvjpg::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||
void nvhost_nvjpg::OnClose(DeviceFD fd) {}
|
||||
|
||||
NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) {
|
||||
|
@ -22,7 +22,7 @@ public:
|
||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output) override;
|
||||
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
private:
|
||||
|
@ -33,7 +33,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
||||
case 0x3:
|
||||
return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output);
|
||||
case 0x9:
|
||||
return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output);
|
||||
return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output, fd);
|
||||
case 0xa:
|
||||
return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output);
|
||||
default:
|
||||
@ -68,7 +68,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvhost_vic::OnOpen(DeviceFD fd) {}
|
||||
void nvhost_vic::OnOpen(size_t session_id, DeviceFD fd) {
|
||||
sessions[fd] = session_id;
|
||||
}
|
||||
|
||||
void nvhost_vic::OnClose(DeviceFD fd) {
|
||||
auto& host1x_file = core.Host1xDeviceFile();
|
||||
@ -76,6 +78,10 @@ void nvhost_vic::OnClose(DeviceFD fd) {
|
||||
if (iter != host1x_file.fd_to_id.end()) {
|
||||
system.GPU().ClearCdmaInstance(iter->second);
|
||||
}
|
||||
auto it = sessions.find(fd);
|
||||
if (it != sessions.end()) {
|
||||
sessions.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@ -19,7 +19,7 @@ public:
|
||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output) override;
|
||||
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
};
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@ -36,9 +36,9 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
|
||||
case 0x3:
|
||||
return WrapFixed(this, &nvmap::IocFromId, input, output);
|
||||
case 0x4:
|
||||
return WrapFixed(this, &nvmap::IocAlloc, input, output);
|
||||
return WrapFixed(this, &nvmap::IocAlloc, input, output, fd);
|
||||
case 0x5:
|
||||
return WrapFixed(this, &nvmap::IocFree, input, output);
|
||||
return WrapFixed(this, &nvmap::IocFree, input, output, fd);
|
||||
case 0x9:
|
||||
return WrapFixed(this, &nvmap::IocParam, input, output);
|
||||
case 0xe:
|
||||
@ -67,8 +67,15 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, st
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvmap::OnOpen(DeviceFD fd) {}
|
||||
void nvmap::OnClose(DeviceFD fd) {}
|
||||
void nvmap::OnOpen(size_t session_id, DeviceFD fd) {
|
||||
sessions[fd] = session_id;
|
||||
}
|
||||
void nvmap::OnClose(DeviceFD fd) {
|
||||
auto it = sessions.find(fd);
|
||||
if (it != sessions.end()) {
|
||||
sessions.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
NvResult nvmap::IocCreate(IocCreateParams& params) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
|
||||
@ -87,7 +94,7 @@ NvResult nvmap::IocCreate(IocCreateParams& params) {
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocAlloc(IocAllocParams& params) {
|
||||
NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
|
||||
|
||||
if (!params.handle) {
|
||||
@ -116,15 +123,15 @@ NvResult nvmap::IocAlloc(IocAllocParams& params) {
|
||||
return NvResult::InsufficientMemory;
|
||||
}
|
||||
|
||||
const auto result =
|
||||
handle_description->Alloc(params.flags, params.align, params.kind, params.address);
|
||||
const auto result = handle_description->Alloc(params.flags, params.align, params.kind,
|
||||
params.address, sessions[fd]);
|
||||
if (result != NvResult::Success) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
|
||||
return result;
|
||||
}
|
||||
bool is_out_io{};
|
||||
ASSERT(system.ApplicationProcess()
|
||||
->GetPageTable()
|
||||
auto process = container.GetSession(sessions[fd])->process;
|
||||
ASSERT(process->GetPageTable()
|
||||
.LockForMapDeviceAddressSpace(&is_out_io, handle_description->address,
|
||||
handle_description->size,
|
||||
Kernel::KMemoryPermission::None, true, false)
|
||||
@ -224,7 +231,7 @@ NvResult nvmap::IocParam(IocParamParams& params) {
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvmap::IocFree(IocFreeParams& params) {
|
||||
NvResult nvmap::IocFree(IocFreeParams& params, DeviceFD fd) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
if (!params.handle) {
|
||||
@ -233,9 +240,9 @@ NvResult nvmap::IocFree(IocFreeParams& params) {
|
||||
}
|
||||
|
||||
if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
|
||||
auto process = container.GetSession(sessions[fd])->process;
|
||||
if (freeInfo->can_unlock) {
|
||||
ASSERT(system.ApplicationProcess()
|
||||
->GetPageTable()
|
||||
ASSERT(process->GetPageTable()
|
||||
.UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
|
||||
.IsSuccess());
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ public:
|
||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output) override;
|
||||
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
enum class HandleParameterType : u32_le {
|
||||
@ -100,11 +100,11 @@ public:
|
||||
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
||||
|
||||
NvResult IocCreate(IocCreateParams& params);
|
||||
NvResult IocAlloc(IocAllocParams& params);
|
||||
NvResult IocAlloc(IocAllocParams& params, DeviceFD fd);
|
||||
NvResult IocGetId(IocGetIdParams& params);
|
||||
NvResult IocFromId(IocFromIdParams& params);
|
||||
NvResult IocParam(IocParamParams& params);
|
||||
NvResult IocFree(IocFreeParams& params);
|
||||
NvResult IocFree(IocFreeParams& params, DeviceFD fd);
|
||||
|
||||
private:
|
||||
/// Id to use for the next handle that is created.
|
||||
@ -115,6 +115,7 @@ private:
|
||||
|
||||
NvCore::Container& container;
|
||||
NvCore::NvMap& file;
|
||||
std::unordered_map<DeviceFD, size_t> sessions;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@ -45,13 +45,22 @@ void EventInterface::FreeEvent(Kernel::KEvent* event) {
|
||||
void LoopProcess(Nvnflinger::Nvnflinger& nvnflinger, Core::System& system) {
|
||||
auto server_manager = std::make_unique<ServerManager>(system);
|
||||
auto module = std::make_shared<Module>(system);
|
||||
server_manager->RegisterNamedService("nvdrv", std::make_shared<NVDRV>(system, module, "nvdrv"));
|
||||
server_manager->RegisterNamedService("nvdrv:a",
|
||||
std::make_shared<NVDRV>(system, module, "nvdrv:a"));
|
||||
server_manager->RegisterNamedService("nvdrv:s",
|
||||
std::make_shared<NVDRV>(system, module, "nvdrv:s"));
|
||||
server_manager->RegisterNamedService("nvdrv:t",
|
||||
std::make_shared<NVDRV>(system, module, "nvdrv:t"));
|
||||
const auto NvdrvInterfaceFactoryForApplication = [&, module] {
|
||||
return std::make_shared<NVDRV>(system, module, "nvdrv");
|
||||
};
|
||||
const auto NvdrvInterfaceFactoryForApplets = [&, module] {
|
||||
return std::make_shared<NVDRV>(system, module, "nvdrv:a");
|
||||
};
|
||||
const auto NvdrvInterfaceFactoryForSysmodules = [&, module] {
|
||||
return std::make_shared<NVDRV>(system, module, "nvdrv:a");
|
||||
};
|
||||
const auto NvdrvInterfaceFactory = [&, module] {
|
||||
return std::make_shared<NVDRV>(system, module, "nvdrv:t");
|
||||
};
|
||||
server_manager->RegisterNamedService("nvdrv", NvdrvInterfaceFactoryForApplication);
|
||||
server_manager->RegisterNamedService("nvdrv:a", NvdrvInterfaceFactoryForApplets);
|
||||
server_manager->RegisterNamedService("nvdrv:s", NvdrvInterfaceFactoryForSysmodules);
|
||||
server_manager->RegisterNamedService("nvdrv:t", NvdrvInterfaceFactory);
|
||||
server_manager->RegisterNamedService("nvmemp", std::make_shared<NVMEMP>(system));
|
||||
nvnflinger.SetNVDrvInstance(module);
|
||||
ServerManager::RunServer(std::move(server_manager));
|
||||
@ -113,7 +122,7 @@ NvResult Module::VerifyFD(DeviceFD fd) const {
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
DeviceFD Module::Open(const std::string& device_name) {
|
||||
DeviceFD Module::Open(const std::string& device_name, size_t session_id) {
|
||||
auto it = builders.find(device_name);
|
||||
if (it == builders.end()) {
|
||||
LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
|
||||
@ -124,7 +133,7 @@ DeviceFD Module::Open(const std::string& device_name) {
|
||||
auto& builder = it->second;
|
||||
auto device = builder(fd)->second;
|
||||
|
||||
device->OnOpen(fd);
|
||||
device->OnOpen(session_id, fd);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ public:
|
||||
NvResult VerifyFD(DeviceFD fd) const;
|
||||
|
||||
/// Opens a device node and returns a file descriptor to it.
|
||||
DeviceFD Open(const std::string& device_name);
|
||||
DeviceFD Open(const std::string& device_name, size_t session_id);
|
||||
|
||||
/// Sends an ioctl command to the specified file descriptor.
|
||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output);
|
||||
@ -93,6 +93,10 @@ public:
|
||||
|
||||
NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event);
|
||||
|
||||
NvCore::Container& GetContainer() {
|
||||
return container;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class EventInterface;
|
||||
friend class Service::Nvnflinger::Nvnflinger;
|
||||
|
@ -3,8 +3,10 @@
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_readable_event.h"
|
||||
#include "core/hle/service/ipc_helpers.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
@ -37,7 +39,7 @@ void NVDRV::Open(HLERequestContext& ctx) {
|
||||
return;
|
||||
}
|
||||
|
||||
DeviceFD fd = nvdrv->Open(device_name);
|
||||
DeviceFD fd = nvdrv->Open(device_name, session_id);
|
||||
|
||||
rb.Push<DeviceFD>(fd);
|
||||
rb.PushEnum(fd != INVALID_NVDRV_FD ? NvResult::Success : NvResult::FileOperationFailed);
|
||||
@ -150,12 +152,29 @@ void NVDRV::Close(HLERequestContext& ctx) {
|
||||
|
||||
void NVDRV::Initialize(HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
|
||||
is_initialized = true;
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
SCOPE_EXIT({
|
||||
rb.Push(ResultSuccess);
|
||||
rb.PushEnum(NvResult::Success);
|
||||
});
|
||||
|
||||
if (is_initialized) {
|
||||
// No need to initialize again
|
||||
return;
|
||||
}
|
||||
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto process_handle{ctx.GetCopyHandle(0)};
|
||||
// The transfer memory is lent to nvdrv as a work buffer since nvdrv is
|
||||
// unable to allocate as much memory on its own. For HLE it's unnecessary to handle it
|
||||
[[maybe_unused]] const auto transfer_memory_handle{ctx.GetCopyHandle(1)};
|
||||
[[maybe_unused]] const auto transfer_memory_size = rp.Pop<u32>();
|
||||
|
||||
auto& container = nvdrv->GetContainer();
|
||||
auto process = ctx.GetObjectFromHandle<Kernel::KProcess>(process_handle);
|
||||
session_id = container.OpenSession(process.GetPointerUnsafe());
|
||||
|
||||
is_initialized = true;
|
||||
}
|
||||
|
||||
void NVDRV::QueryEvent(HLERequestContext& ctx) {
|
||||
@ -242,6 +261,9 @@ NVDRV::NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char*
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
NVDRV::~NVDRV() = default;
|
||||
NVDRV::~NVDRV() {
|
||||
auto& container = nvdrv->GetContainer();
|
||||
container.CloseSession(session_id);
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia
|
||||
|
@ -35,6 +35,7 @@ private:
|
||||
|
||||
u64 pid{};
|
||||
bool is_initialized{};
|
||||
size_t session_id{};
|
||||
Common::ScratchBuffer<u8> output_buffer;
|
||||
Common::ScratchBuffer<u8> inline_output_buffer;
|
||||
};
|
||||
|
@ -87,19 +87,20 @@ Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap,
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
|
||||
Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Nvidia::DeviceFD nvmap_fd) {
|
||||
// Free the handle.
|
||||
Nvidia::Devices::nvmap::IocFreeParams free_params{
|
||||
.handle = handle,
|
||||
};
|
||||
R_UNLESS(nvmap.IocFree(free_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
|
||||
R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success,
|
||||
VI::ResultOperationFailed);
|
||||
|
||||
// We succeeded.
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer,
|
||||
u32 size) {
|
||||
u32 size, Nvidia::DeviceFD nvmap_fd) {
|
||||
// Assign the allocated memory to the handle.
|
||||
Nvidia::Devices::nvmap::IocAllocParams alloc_params{
|
||||
.handle = handle,
|
||||
@ -109,16 +110,16 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce
|
||||
.kind = 0,
|
||||
.address = GetInteger(buffer),
|
||||
};
|
||||
R_UNLESS(nvmap.IocAlloc(alloc_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
|
||||
R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success,
|
||||
VI::ResultOperationFailed);
|
||||
|
||||
// We succeeded.
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv,
|
||||
Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, Nvidia::DeviceFD nvmap_fd,
|
||||
Common::ProcessAddress buffer, u32 size) {
|
||||
// Get the nvmap device.
|
||||
auto nvmap_fd = nvdrv.Open("/dev/nvmap");
|
||||
auto nvmap = nvdrv.GetDevice<Nvidia::Devices::nvmap>(nvmap_fd);
|
||||
ASSERT(nvmap != nullptr);
|
||||
|
||||
@ -127,11 +128,11 @@ Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv,
|
||||
|
||||
// Ensure we maintain a clean state on failure.
|
||||
ON_RESULT_FAILURE {
|
||||
ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle)));
|
||||
ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle, nvmap_fd)));
|
||||
};
|
||||
|
||||
// Assign the allocated memory to the handle.
|
||||
R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size));
|
||||
R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size, nvmap_fd));
|
||||
}
|
||||
|
||||
constexpr auto SharedBufferBlockLinearFormat = android::PixelFormat::Rgba8888;
|
||||
@ -197,9 +198,13 @@ Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u
|
||||
std::addressof(m_buffer_page_group), m_system,
|
||||
SharedBufferSize));
|
||||
|
||||
auto& container = m_nvdrv->GetContainer();
|
||||
m_session_id = container.OpenSession(m_system.ApplicationProcess());
|
||||
m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id);
|
||||
|
||||
// Create an nvmap handle for the buffer and assign the memory to it.
|
||||
R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, map_address,
|
||||
SharedBufferSize));
|
||||
R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd,
|
||||
map_address, SharedBufferSize));
|
||||
|
||||
// Record the display id.
|
||||
m_display_id = display_id;
|
||||
|
@ -4,6 +4,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "common/math_util.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "core/hle/service/nvnflinger/nvnflinger.h"
|
||||
#include "core/hle/service/nvnflinger/ui/fence.h"
|
||||
|
||||
@ -53,7 +54,8 @@ private:
|
||||
u64 m_layer_id = 0;
|
||||
u32 m_buffer_nvmap_handle = 0;
|
||||
SharedMemoryPoolLayout m_pool_layout = {};
|
||||
|
||||
Nvidia::DeviceFD m_nvmap_fd = {};
|
||||
size_t m_session_id = {};
|
||||
std::unique_ptr<Kernel::KPageGroup> m_buffer_page_group;
|
||||
|
||||
std::mutex m_guard;
|
||||
|
@ -126,7 +126,7 @@ void Nvnflinger::ShutdownLayers() {
|
||||
|
||||
void Nvnflinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
|
||||
nvdrv = std::move(instance);
|
||||
disp_fd = nvdrv->Open("/dev/nvdisp_disp0");
|
||||
disp_fd = nvdrv->Open("/dev/nvdisp_disp0", 0);
|
||||
}
|
||||
|
||||
std::optional<u64> Nvnflinger::OpenDisplay(std::string_view name) {
|
||||
|
@ -22,11 +22,13 @@ GraphicBuffer::GraphicBuffer(Service::Nvidia::NvCore::NvMap& nvmap,
|
||||
: NvGraphicBuffer(GetBuffer(buffer)), m_nvmap(std::addressof(nvmap)) {
|
||||
if (this->BufferId() > 0) {
|
||||
m_nvmap->DuplicateHandle(this->BufferId(), true);
|
||||
m_nvmap->PinHandle(this->BufferId(), false);
|
||||
}
|
||||
}
|
||||
|
||||
GraphicBuffer::~GraphicBuffer() {
|
||||
if (m_nvmap != nullptr && this->BufferId() > 0) {
|
||||
m_nvmap->UnpinHandle(this->BufferId());
|
||||
m_nvmap->FreeHandle(this->BufferId(), true);
|
||||
}
|
||||
}
|
||||
|
@ -22,27 +22,26 @@ constexpr Result ResultProcessNotFound{ErrorModule::PM, 1};
|
||||
|
||||
constexpr u64 NO_PROCESS_FOUND_PID{0};
|
||||
|
||||
std::optional<Kernel::KProcess*> SearchProcessList(
|
||||
const std::vector<Kernel::KProcess*>& process_list,
|
||||
std::function<bool(Kernel::KProcess*)> predicate) {
|
||||
using ProcessList = std::list<Kernel::KScopedAutoObject<Kernel::KProcess>>;
|
||||
|
||||
template <typename F>
|
||||
Kernel::KScopedAutoObject<Kernel::KProcess> SearchProcessList(ProcessList& process_list,
|
||||
F&& predicate) {
|
||||
const auto iter = std::find_if(process_list.begin(), process_list.end(), predicate);
|
||||
|
||||
if (iter == process_list.end()) {
|
||||
return std::nullopt;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return *iter;
|
||||
return iter->GetPointerUnsafe();
|
||||
}
|
||||
|
||||
void GetApplicationPidGeneric(HLERequestContext& ctx,
|
||||
const std::vector<Kernel::KProcess*>& process_list) {
|
||||
const auto process = SearchProcessList(process_list, [](const auto& proc) {
|
||||
return proc->GetProcessId() == Kernel::KProcess::ProcessIdMin;
|
||||
});
|
||||
void GetApplicationPidGeneric(HLERequestContext& ctx, ProcessList& process_list) {
|
||||
auto process = SearchProcessList(process_list, [](auto& p) { return p->IsApplication(); });
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push(process.has_value() ? (*process)->GetProcessId() : NO_PROCESS_FOUND_PID);
|
||||
rb.Push(process.IsNull() ? NO_PROCESS_FOUND_PID : process->GetProcessId());
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
@ -80,8 +79,7 @@ private:
|
||||
|
||||
class DebugMonitor final : public ServiceFramework<DebugMonitor> {
|
||||
public:
|
||||
explicit DebugMonitor(Core::System& system_)
|
||||
: ServiceFramework{system_, "pm:dmnt"}, kernel{system_.Kernel()} {
|
||||
explicit DebugMonitor(Core::System& system_) : ServiceFramework{system_, "pm:dmnt"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "GetJitDebugProcessIdList"},
|
||||
@ -106,12 +104,11 @@ private:
|
||||
|
||||
LOG_DEBUG(Service_PM, "called, program_id={:016X}", program_id);
|
||||
|
||||
const auto process =
|
||||
SearchProcessList(kernel.GetProcessList(), [program_id](const auto& proc) {
|
||||
return proc->GetProgramId() == program_id;
|
||||
});
|
||||
auto list = kernel.GetProcessList();
|
||||
auto process = SearchProcessList(
|
||||
list, [program_id](auto& p) { return p->GetProgramId() == program_id; });
|
||||
|
||||
if (!process.has_value()) {
|
||||
if (process.IsNull()) {
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultProcessNotFound);
|
||||
return;
|
||||
@ -119,12 +116,13 @@ private:
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push((*process)->GetProcessId());
|
||||
rb.Push(process->GetProcessId());
|
||||
}
|
||||
|
||||
void GetApplicationProcessId(HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_PM, "called");
|
||||
GetApplicationPidGeneric(ctx, kernel.GetProcessList());
|
||||
auto list = kernel.GetProcessList();
|
||||
GetApplicationPidGeneric(ctx, list);
|
||||
}
|
||||
|
||||
void AtmosphereGetProcessInfo(HLERequestContext& ctx) {
|
||||
@ -135,11 +133,10 @@ private:
|
||||
|
||||
LOG_WARNING(Service_PM, "(Partial Implementation) called, pid={:016X}", pid);
|
||||
|
||||
const auto process = SearchProcessList(kernel.GetProcessList(), [pid](const auto& proc) {
|
||||
return proc->GetProcessId() == pid;
|
||||
});
|
||||
auto list = kernel.GetProcessList();
|
||||
auto process = SearchProcessList(list, [pid](auto& p) { return p->GetProcessId() == pid; });
|
||||
|
||||
if (!process.has_value()) {
|
||||
if (process.IsNull()) {
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultProcessNotFound);
|
||||
return;
|
||||
@ -159,7 +156,7 @@ private:
|
||||
|
||||
OverrideStatus override_status{};
|
||||
ProgramLocation program_location{
|
||||
.program_id = (*process)->GetProgramId(),
|
||||
.program_id = process->GetProgramId(),
|
||||
.storage_id = 0,
|
||||
};
|
||||
|
||||
@ -169,14 +166,11 @@ private:
|
||||
rb.PushRaw(program_location);
|
||||
rb.PushRaw(override_status);
|
||||
}
|
||||
|
||||
const Kernel::KernelCore& kernel;
|
||||
};
|
||||
|
||||
class Info final : public ServiceFramework<Info> {
|
||||
public:
|
||||
explicit Info(Core::System& system_, const std::vector<Kernel::KProcess*>& process_list_)
|
||||
: ServiceFramework{system_, "pm:info"}, process_list{process_list_} {
|
||||
explicit Info(Core::System& system_) : ServiceFramework{system_, "pm:info"} {
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &Info::GetProgramId, "GetProgramId"},
|
||||
{65000, &Info::AtmosphereGetProcessId, "AtmosphereGetProcessId"},
|
||||
@ -193,11 +187,11 @@ private:
|
||||
|
||||
LOG_DEBUG(Service_PM, "called, process_id={:016X}", process_id);
|
||||
|
||||
const auto process = SearchProcessList(process_list, [process_id](const auto& proc) {
|
||||
return proc->GetProcessId() == process_id;
|
||||
});
|
||||
auto list = kernel.GetProcessList();
|
||||
auto process = SearchProcessList(
|
||||
list, [process_id](auto& p) { return p->GetProcessId() == process_id; });
|
||||
|
||||
if (!process.has_value()) {
|
||||
if (process.IsNull()) {
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultProcessNotFound);
|
||||
return;
|
||||
@ -205,7 +199,7 @@ private:
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push((*process)->GetProgramId());
|
||||
rb.Push(process->GetProgramId());
|
||||
}
|
||||
|
||||
void AtmosphereGetProcessId(HLERequestContext& ctx) {
|
||||
@ -214,11 +208,11 @@ private:
|
||||
|
||||
LOG_DEBUG(Service_PM, "called, program_id={:016X}", program_id);
|
||||
|
||||
const auto process = SearchProcessList(process_list, [program_id](const auto& proc) {
|
||||
return proc->GetProgramId() == program_id;
|
||||
});
|
||||
auto list = system.Kernel().GetProcessList();
|
||||
auto process = SearchProcessList(
|
||||
list, [program_id](auto& p) { return p->GetProgramId() == program_id; });
|
||||
|
||||
if (!process.has_value()) {
|
||||
if (process.IsNull()) {
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultProcessNotFound);
|
||||
return;
|
||||
@ -226,16 +220,13 @@ private:
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push((*process)->GetProcessId());
|
||||
rb.Push(process->GetProcessId());
|
||||
}
|
||||
|
||||
const std::vector<Kernel::KProcess*>& process_list;
|
||||
};
|
||||
|
||||
class Shell final : public ServiceFramework<Shell> {
|
||||
public:
|
||||
explicit Shell(Core::System& system_)
|
||||
: ServiceFramework{system_, "pm:shell"}, kernel{system_.Kernel()} {
|
||||
explicit Shell(Core::System& system_) : ServiceFramework{system_, "pm:shell"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "LaunchProgram"},
|
||||
@ -257,10 +248,9 @@ public:
|
||||
private:
|
||||
void GetApplicationProcessIdForShell(HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_PM, "called");
|
||||
GetApplicationPidGeneric(ctx, kernel.GetProcessList());
|
||||
auto list = kernel.GetProcessList();
|
||||
GetApplicationPidGeneric(ctx, list);
|
||||
}
|
||||
|
||||
const Kernel::KernelCore& kernel;
|
||||
};
|
||||
|
||||
void LoopProcess(Core::System& system) {
|
||||
@ -268,8 +258,7 @@ void LoopProcess(Core::System& system) {
|
||||
|
||||
server_manager->RegisterNamedService("pm:bm", std::make_shared<BootMode>(system));
|
||||
server_manager->RegisterNamedService("pm:dmnt", std::make_shared<DebugMonitor>(system));
|
||||
server_manager->RegisterNamedService(
|
||||
"pm:info", std::make_shared<Info>(system, system.Kernel().GetProcessList()));
|
||||
server_manager->RegisterNamedService("pm:info", std::make_shared<Info>(system));
|
||||
server_manager->RegisterNamedService("pm:shell", std::make_shared<Shell>(system));
|
||||
ServerManager::RunServer(std::move(server_manager));
|
||||
}
|
||||
|
@ -256,8 +256,13 @@ Result ServerManager::WaitAndProcessImpl() {
|
||||
|
||||
// Wait for a signal.
|
||||
s32 out_index{-1};
|
||||
R_TRY(Kernel::KSynchronizationObject::Wait(m_system.Kernel(), &out_index, wait_objs.data(),
|
||||
num_objs, -1));
|
||||
R_TRY_CATCH(Kernel::KSynchronizationObject::Wait(m_system.Kernel(), &out_index,
|
||||
wait_objs.data(), num_objs, -1)) {
|
||||
R_CATCH(Kernel::ResultSessionClosed) {
|
||||
// On session closed, index is updated and we don't want to return an error.
|
||||
}
|
||||
}
|
||||
R_END_TRY_CATCH;
|
||||
ASSERT(out_index >= 0 && out_index < num_objs);
|
||||
|
||||
// Set the output index.
|
||||
|
@ -19,8 +19,54 @@
|
||||
#include "core/arm/nce/patcher.h"
|
||||
#endif
|
||||
|
||||
#ifndef HAS_NCE
|
||||
namespace Core::NCE {
|
||||
class Patcher {};
|
||||
} // namespace Core::NCE
|
||||
#endif
|
||||
|
||||
namespace Loader {
|
||||
|
||||
struct PatchCollection {
|
||||
explicit PatchCollection(bool is_application_) : is_application{is_application_} {
|
||||
module_patcher_indices.fill(-1);
|
||||
patchers.emplace_back();
|
||||
}
|
||||
|
||||
std::vector<Core::NCE::Patcher>* GetPatchers() {
|
||||
if (is_application && Settings::IsNceEnabled()) {
|
||||
return &patchers;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t GetTotalPatchSize() const {
|
||||
size_t total_size{};
|
||||
#ifdef HAS_NCE
|
||||
for (auto& patcher : patchers) {
|
||||
total_size += patcher.GetSectionSize();
|
||||
}
|
||||
#endif
|
||||
return total_size;
|
||||
}
|
||||
|
||||
void SaveIndex(size_t module) {
|
||||
module_patcher_indices[module] = static_cast<s32>(patchers.size() - 1);
|
||||
}
|
||||
|
||||
s32 GetIndex(size_t module) const {
|
||||
return module_patcher_indices[module];
|
||||
}
|
||||
|
||||
s32 GetLastIndex() const {
|
||||
return static_cast<s32>(patchers.size()) - 1;
|
||||
}
|
||||
|
||||
bool is_application;
|
||||
std::vector<Core::NCE::Patcher> patchers;
|
||||
std::array<s32, 13> module_patcher_indices{};
|
||||
};
|
||||
|
||||
AppLoader_DeconstructedRomDirectory::AppLoader_DeconstructedRomDirectory(FileSys::VirtualFile file_,
|
||||
bool override_update_)
|
||||
: AppLoader(std::move(file_)), override_update(override_update_), is_hbl(false) {
|
||||
@ -142,18 +188,7 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
|
||||
std::size_t code_size{};
|
||||
|
||||
// Define an nce patch context for each potential module.
|
||||
#ifdef HAS_NCE
|
||||
std::array<Core::NCE::Patcher, 13> module_patchers;
|
||||
#endif
|
||||
|
||||
const auto GetPatcher = [&](size_t i) -> Core::NCE::Patcher* {
|
||||
#ifdef HAS_NCE
|
||||
if (is_application && Settings::IsNceEnabled()) {
|
||||
return &module_patchers[i];
|
||||
}
|
||||
#endif
|
||||
return nullptr;
|
||||
};
|
||||
PatchCollection patch_ctx{is_application};
|
||||
|
||||
// Use the NSO module loader to figure out the code layout
|
||||
for (size_t i = 0; i < static_modules.size(); i++) {
|
||||
@ -164,13 +199,14 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
|
||||
}
|
||||
|
||||
const bool should_pass_arguments = std::strcmp(module, "rtld") == 0;
|
||||
const auto tentative_next_load_addr =
|
||||
AppLoader_NSO::LoadModule(process, system, *module_file, code_size,
|
||||
should_pass_arguments, false, {}, GetPatcher(i));
|
||||
const auto tentative_next_load_addr = AppLoader_NSO::LoadModule(
|
||||
process, system, *module_file, code_size, should_pass_arguments, false, {},
|
||||
patch_ctx.GetPatchers(), patch_ctx.GetLastIndex());
|
||||
if (!tentative_next_load_addr) {
|
||||
return {ResultStatus::ErrorLoadingNSO, {}};
|
||||
}
|
||||
|
||||
patch_ctx.SaveIndex(i);
|
||||
code_size = *tentative_next_load_addr;
|
||||
}
|
||||
|
||||
@ -184,6 +220,9 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
|
||||
return 0;
|
||||
}();
|
||||
|
||||
// Add patch size to the total module size
|
||||
code_size += patch_ctx.GetTotalPatchSize();
|
||||
|
||||
// Setup the process code layout
|
||||
if (process.LoadFromMetadata(metadata, code_size, fastmem_base, is_hbl).IsError()) {
|
||||
return {ResultStatus::ErrorUnableToParseKernelMetadata, {}};
|
||||
@ -204,9 +243,9 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
|
||||
|
||||
const VAddr load_addr{next_load_addr};
|
||||
const bool should_pass_arguments = std::strcmp(module, "rtld") == 0;
|
||||
const auto tentative_next_load_addr =
|
||||
AppLoader_NSO::LoadModule(process, system, *module_file, load_addr,
|
||||
should_pass_arguments, true, pm, GetPatcher(i));
|
||||
const auto tentative_next_load_addr = AppLoader_NSO::LoadModule(
|
||||
process, system, *module_file, load_addr, should_pass_arguments, true, pm,
|
||||
patch_ctx.GetPatchers(), patch_ctx.GetIndex(i));
|
||||
if (!tentative_next_load_addr) {
|
||||
return {ResultStatus::ErrorLoadingNSO, {}};
|
||||
}
|
||||
@ -216,20 +255,6 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
|
||||
LOG_DEBUG(Loader, "loaded module {} @ {:#X}", module, load_addr);
|
||||
}
|
||||
|
||||
// Find the RomFS by searching for a ".romfs" file in this directory
|
||||
const auto& files = dir->GetFiles();
|
||||
const auto romfs_iter =
|
||||
std::find_if(files.begin(), files.end(), [](const FileSys::VirtualFile& f) {
|
||||
return f->GetName().find(".romfs") != std::string::npos;
|
||||
});
|
||||
|
||||
// Register the RomFS if a ".romfs" file was found
|
||||
if (romfs_iter != files.end() && *romfs_iter != nullptr) {
|
||||
romfs = *romfs_iter;
|
||||
system.GetFileSystemController().RegisterRomFS(std::make_unique<FileSys::RomFSFactory>(
|
||||
*this, system.GetContentProvider(), system.GetFileSystemController()));
|
||||
}
|
||||
|
||||
is_loaded = true;
|
||||
return {ResultStatus::Success,
|
||||
LoadParameters{metadata.GetMainThreadPriority(), metadata.GetMainThreadStackSize()}};
|
||||
|
@ -74,8 +74,10 @@ AppLoader_NCA::LoadResult AppLoader_NCA::Load(Kernel::KProcess& process, Core::S
|
||||
return load_result;
|
||||
}
|
||||
|
||||
system.GetFileSystemController().RegisterRomFS(std::make_unique<FileSys::RomFSFactory>(
|
||||
*this, system.GetContentProvider(), system.GetFileSystemController()));
|
||||
system.GetFileSystemController().RegisterProcess(
|
||||
process.GetProcessId(), nca->GetTitleId(),
|
||||
std::make_shared<FileSys::RomFSFactory>(*this, system.GetContentProvider(),
|
||||
system.GetFileSystemController()));
|
||||
|
||||
is_loaded = true;
|
||||
return load_result;
|
||||
|
@ -275,10 +275,12 @@ AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::KProcess& process, Core::S
|
||||
return {ResultStatus::ErrorLoadingNRO, {}};
|
||||
}
|
||||
|
||||
if (romfs != nullptr) {
|
||||
system.GetFileSystemController().RegisterRomFS(std::make_unique<FileSys::RomFSFactory>(
|
||||
*this, system.GetContentProvider(), system.GetFileSystemController()));
|
||||
}
|
||||
u64 program_id{};
|
||||
ReadProgramId(program_id);
|
||||
system.GetFileSystemController().RegisterProcess(
|
||||
process.GetProcessId(), program_id,
|
||||
std::make_unique<FileSys::RomFSFactory>(*this, system.GetContentProvider(),
|
||||
system.GetFileSystemController()));
|
||||
|
||||
is_loaded = true;
|
||||
return {ResultStatus::Success, LoadParameters{Kernel::KThread::DefaultThreadPriority,
|
||||
|
@ -77,7 +77,8 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::KProcess& process, Core::
|
||||
const FileSys::VfsFile& nso_file, VAddr load_base,
|
||||
bool should_pass_arguments, bool load_into_process,
|
||||
std::optional<FileSys::PatchManager> pm,
|
||||
Core::NCE::Patcher* patch) {
|
||||
std::vector<Core::NCE::Patcher>* patches,
|
||||
s32 patch_index) {
|
||||
if (nso_file.GetSize() < sizeof(NSOHeader)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -94,9 +95,12 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::KProcess& process, Core::
|
||||
// Allocate some space at the beginning if we are patching in PreText mode.
|
||||
const size_t module_start = [&]() -> size_t {
|
||||
#ifdef HAS_NCE
|
||||
if (patch && patch->GetPatchMode() == Core::NCE::PatchMode::PreText) {
|
||||
if (patches && load_into_process) {
|
||||
auto* patch = &patches->operator[](patch_index);
|
||||
if (patch->GetPatchMode() == Core::NCE::PatchMode::PreText) {
|
||||
return patch->GetSectionSize();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}();
|
||||
@ -160,27 +164,24 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::KProcess& process, Core::
|
||||
#ifdef HAS_NCE
|
||||
// If we are computing the process code layout and using nce backend, patch.
|
||||
const auto& code = codeset.CodeSegment();
|
||||
if (patch && patch->GetPatchMode() == Core::NCE::PatchMode::None) {
|
||||
auto* patch = patches ? &patches->operator[](patch_index) : nullptr;
|
||||
if (patch && !load_into_process) {
|
||||
// Patch SVCs and MRS calls in the guest code
|
||||
patch->PatchText(program_image, code);
|
||||
|
||||
// Add patch section size to the module size.
|
||||
image_size += static_cast<u32>(patch->GetSectionSize());
|
||||
while (!patch->PatchText(program_image, code)) {
|
||||
patch = &patches->emplace_back();
|
||||
}
|
||||
} else if (patch) {
|
||||
// Relocate code patch and copy to the program_image.
|
||||
patch->RelocateAndCopy(load_base, code, program_image, &process.GetPostHandlers());
|
||||
|
||||
if (patch->RelocateAndCopy(load_base, code, program_image, &process.GetPostHandlers())) {
|
||||
// Update patch section.
|
||||
auto& patch_segment = codeset.PatchSegment();
|
||||
patch_segment.addr =
|
||||
patch->GetPatchMode() == Core::NCE::PatchMode::PreText ? 0 : image_size;
|
||||
patch_segment.size = static_cast<u32>(patch->GetSectionSize());
|
||||
|
||||
// Add patch section size to the module size. In PreText mode image_size
|
||||
// already contains the patch segment as part of module_start.
|
||||
if (patch->GetPatchMode() == Core::NCE::PatchMode::PostData) {
|
||||
image_size += patch_segment.size;
|
||||
}
|
||||
|
||||
// Refresh image_size to take account the patch section if it was added by RelocateAndCopy
|
||||
image_size = static_cast<u32>(program_image.size());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -93,7 +93,8 @@ public:
|
||||
const FileSys::VfsFile& nso_file, VAddr load_base,
|
||||
bool should_pass_arguments, bool load_into_process,
|
||||
std::optional<FileSys::PatchManager> pm = {},
|
||||
Core::NCE::Patcher* patch = nullptr);
|
||||
std::vector<Core::NCE::Patcher>* patches = nullptr,
|
||||
s32 patch_index = -1);
|
||||
|
||||
LoadResult Load(Kernel::KProcess& process, Core::System& system) override;
|
||||
|
||||
|
@ -111,7 +111,8 @@ AppLoader_NSP::LoadResult AppLoader_NSP::Load(Kernel::KProcess& process, Core::S
|
||||
|
||||
FileSys::VirtualFile update_raw;
|
||||
if (ReadUpdateRaw(update_raw) == ResultStatus::Success && update_raw != nullptr) {
|
||||
system.GetFileSystemController().SetPackedUpdate(std::move(update_raw));
|
||||
system.GetFileSystemController().SetPackedUpdate(process.GetProcessId(),
|
||||
std::move(update_raw));
|
||||
}
|
||||
|
||||
is_loaded = true;
|
||||
|
@ -78,7 +78,8 @@ AppLoader_XCI::LoadResult AppLoader_XCI::Load(Kernel::KProcess& process, Core::S
|
||||
|
||||
FileSys::VirtualFile update_raw;
|
||||
if (ReadUpdateRaw(update_raw) == ResultStatus::Success && update_raw != nullptr) {
|
||||
system.GetFileSystemController().SetPackedUpdate(std::move(update_raw));
|
||||
system.GetFileSystemController().SetPackedUpdate(process.GetProcessId(),
|
||||
std::move(update_raw));
|
||||
}
|
||||
|
||||
is_loaded = true;
|
||||
|
@ -24,6 +24,8 @@
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/host1x/gpu_device_memory_manager.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/rasterizer_download_area.h"
|
||||
|
||||
namespace Core::Memory {
|
||||
@ -637,17 +639,6 @@ struct Memory::Impl {
|
||||
LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", GetInteger(target),
|
||||
base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE);
|
||||
|
||||
// During boot, current_page_table might not be set yet, in which case we need not flush
|
||||
if (system.IsPoweredOn()) {
|
||||
auto& gpu = system.GPU();
|
||||
for (u64 i = 0; i < size; i++) {
|
||||
const auto page = base + i;
|
||||
if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
|
||||
gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto end = base + size;
|
||||
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
|
||||
base + page_table.pointers.size());
|
||||
@ -811,21 +802,33 @@ struct Memory::Impl {
|
||||
return true;
|
||||
}
|
||||
|
||||
void HandleRasterizerDownload(VAddr address, size_t size) {
|
||||
void HandleRasterizerDownload(VAddr v_address, size_t size) {
|
||||
const auto* p = GetPointerImpl(
|
||||
v_address, []() {}, []() {});
|
||||
if (!gpu_device_memory) [[unlikely]] {
|
||||
gpu_device_memory = &system.Host1x().MemoryManager();
|
||||
}
|
||||
const size_t core = system.GetCurrentHostThreadID();
|
||||
auto& current_area = rasterizer_read_areas[core];
|
||||
const VAddr end_address = address + size;
|
||||
gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) {
|
||||
const DAddr end_address = address + size;
|
||||
if (current_area.start_address <= address && end_address <= current_area.end_address)
|
||||
[[likely]] {
|
||||
return;
|
||||
}
|
||||
current_area = system.GPU().OnCPURead(address, size);
|
||||
});
|
||||
}
|
||||
|
||||
void HandleRasterizerWrite(VAddr address, size_t size) {
|
||||
void HandleRasterizerWrite(VAddr v_address, size_t size) {
|
||||
const auto* p = GetPointerImpl(
|
||||
v_address, []() {}, []() {});
|
||||
constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1;
|
||||
const size_t core = std::min(system.GetCurrentHostThreadID(),
|
||||
sys_core); // any other calls threads go to syscore.
|
||||
if (!gpu_device_memory) [[unlikely]] {
|
||||
gpu_device_memory = &system.Host1x().MemoryManager();
|
||||
}
|
||||
// Guard on sys_core;
|
||||
if (core == sys_core) [[unlikely]] {
|
||||
sys_core_guard.lock();
|
||||
@ -835,8 +838,9 @@ struct Memory::Impl {
|
||||
sys_core_guard.unlock();
|
||||
}
|
||||
});
|
||||
gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) {
|
||||
auto& current_area = rasterizer_write_areas[core];
|
||||
VAddr subaddress = address >> YUZU_PAGEBITS;
|
||||
PAddr subaddress = address >> YUZU_PAGEBITS;
|
||||
bool do_collection = current_area.last_address == subaddress;
|
||||
if (!do_collection) [[unlikely]] {
|
||||
do_collection = system.GPU().OnCPUWrite(address, size);
|
||||
@ -846,25 +850,41 @@ struct Memory::Impl {
|
||||
current_area.last_address = subaddress;
|
||||
}
|
||||
gpu_dirty_managers[core].Collect(address, size);
|
||||
});
|
||||
}
|
||||
|
||||
struct GPUDirtyState {
|
||||
VAddr last_address;
|
||||
PAddr last_address;
|
||||
};
|
||||
|
||||
void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) {
|
||||
system.GPU().InvalidateRegion(GetInteger(dest_addr), size);
|
||||
void InvalidateGPUMemory(u8* p, size_t size) {
|
||||
constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1;
|
||||
const size_t core = std::min(system.GetCurrentHostThreadID(),
|
||||
sys_core); // any other calls threads go to syscore.
|
||||
if (!gpu_device_memory) [[unlikely]] {
|
||||
gpu_device_memory = &system.Host1x().MemoryManager();
|
||||
}
|
||||
|
||||
void FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
|
||||
system.GPU().FlushRegion(GetInteger(dest_addr), size);
|
||||
// Guard on sys_core;
|
||||
if (core == sys_core) [[unlikely]] {
|
||||
sys_core_guard.lock();
|
||||
}
|
||||
SCOPE_EXIT({
|
||||
if (core == sys_core) [[unlikely]] {
|
||||
sys_core_guard.unlock();
|
||||
}
|
||||
});
|
||||
auto& gpu = system.GPU();
|
||||
gpu_device_memory->ApplyOpOnPointer(
|
||||
p, scratch_buffers[core], [&](DAddr address) { gpu.InvalidateRegion(address, size); });
|
||||
}
|
||||
|
||||
Core::System& system;
|
||||
Tegra::MaxwellDeviceMemoryManager* gpu_device_memory{};
|
||||
Common::PageTable* current_page_table = nullptr;
|
||||
std::array<VideoCore::RasterizerDownloadArea, Core::Hardware::NUM_CPU_CORES>
|
||||
rasterizer_read_areas{};
|
||||
std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
|
||||
std::array<Common::ScratchBuffer<u32>, Core::Hardware::NUM_CPU_CORES> scratch_buffers{};
|
||||
std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
|
||||
std::mutex sys_core_guard;
|
||||
|
||||
@ -1059,14 +1079,6 @@ void Memory::MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug)
|
||||
impl->MarkRegionDebug(GetInteger(vaddr), size, debug);
|
||||
}
|
||||
|
||||
void Memory::InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) {
|
||||
impl->InvalidateRegion(dest_addr, size);
|
||||
}
|
||||
|
||||
void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
|
||||
impl->FlushRegion(dest_addr, size);
|
||||
}
|
||||
|
||||
bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
|
||||
[[maybe_unused]] bool mapped = true;
|
||||
[[maybe_unused]] bool rasterizer = false;
|
||||
@ -1078,10 +1090,10 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
|
||||
GetInteger(vaddr));
|
||||
mapped = false;
|
||||
},
|
||||
[&] {
|
||||
impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size);
|
||||
rasterizer = true;
|
||||
});
|
||||
[&] { rasterizer = true; });
|
||||
if (rasterizer) {
|
||||
impl->InvalidateGPUMemory(ptr, size);
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
if (!rasterizer && mapped) {
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include "common/scratch_buffer.h"
|
||||
#include "common/typed_address.h"
|
||||
#include "core/guest_memory.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Common {
|
||||
@ -486,10 +487,10 @@ public:
|
||||
void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug);
|
||||
|
||||
void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
|
||||
void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);
|
||||
|
||||
bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
|
||||
|
||||
bool InvalidateSeparateHeap(void* fault_address);
|
||||
void FlushRegion(Common::ProcessAddress dest_addr, size_t size);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
@ -498,209 +499,9 @@ private:
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
enum GuestMemoryFlags : u32 {
|
||||
Read = 1 << 0,
|
||||
Write = 1 << 1,
|
||||
Safe = 1 << 2,
|
||||
Cached = 1 << 3,
|
||||
|
||||
SafeRead = Read | Safe,
|
||||
SafeWrite = Write | Safe,
|
||||
SafeReadWrite = SafeRead | SafeWrite,
|
||||
SafeReadCachedWrite = SafeReadWrite | Cached,
|
||||
|
||||
UnsafeRead = Read,
|
||||
UnsafeWrite = Write,
|
||||
UnsafeReadWrite = UnsafeRead | UnsafeWrite,
|
||||
UnsafeReadCachedWrite = UnsafeReadWrite | Cached,
|
||||
};
|
||||
|
||||
namespace {
|
||||
template <typename M, typename T, GuestMemoryFlags FLAGS>
|
||||
class GuestMemory {
|
||||
using iterator = T*;
|
||||
using const_iterator = const T*;
|
||||
using value_type = T;
|
||||
using element_type = T;
|
||||
using iterator_category = std::contiguous_iterator_tag;
|
||||
|
||||
public:
|
||||
GuestMemory() = delete;
|
||||
explicit GuestMemory(M& memory, u64 addr, std::size_t size,
|
||||
Common::ScratchBuffer<T>* backup = nullptr)
|
||||
: m_memory{memory}, m_addr{addr}, m_size{size} {
|
||||
static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write);
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Read) {
|
||||
Read(addr, size, backup);
|
||||
}
|
||||
}
|
||||
|
||||
~GuestMemory() = default;
|
||||
|
||||
T* data() noexcept {
|
||||
return m_data_span.data();
|
||||
}
|
||||
|
||||
const T* data() const noexcept {
|
||||
return m_data_span.data();
|
||||
}
|
||||
|
||||
size_t size() const noexcept {
|
||||
return m_size;
|
||||
}
|
||||
|
||||
size_t size_bytes() const noexcept {
|
||||
return this->size() * sizeof(T);
|
||||
}
|
||||
|
||||
[[nodiscard]] T* begin() noexcept {
|
||||
return this->data();
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* begin() const noexcept {
|
||||
return this->data();
|
||||
}
|
||||
|
||||
[[nodiscard]] T* end() noexcept {
|
||||
return this->data() + this->size();
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* end() const noexcept {
|
||||
return this->data() + this->size();
|
||||
}
|
||||
|
||||
T& operator[](size_t index) noexcept {
|
||||
return m_data_span[index];
|
||||
}
|
||||
|
||||
const T& operator[](size_t index) const noexcept {
|
||||
return m_data_span[index];
|
||||
}
|
||||
|
||||
void SetAddressAndSize(u64 addr, std::size_t size) noexcept {
|
||||
m_addr = addr;
|
||||
m_size = size;
|
||||
m_addr_changed = true;
|
||||
}
|
||||
|
||||
std::span<T> Read(u64 addr, std::size_t size,
|
||||
Common::ScratchBuffer<T>* backup = nullptr) noexcept {
|
||||
m_addr = addr;
|
||||
m_size = size;
|
||||
if (m_size == 0) {
|
||||
m_is_data_copy = true;
|
||||
return {};
|
||||
}
|
||||
|
||||
if (this->TrySetSpan()) {
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
m_memory.FlushRegion(m_addr, this->size_bytes());
|
||||
}
|
||||
} else {
|
||||
if (backup) {
|
||||
backup->resize_destructive(this->size());
|
||||
m_data_span = *backup;
|
||||
} else {
|
||||
m_data_copy.resize(this->size());
|
||||
m_data_span = std::span(m_data_copy);
|
||||
}
|
||||
m_is_data_copy = true;
|
||||
m_span_valid = true;
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
m_memory.ReadBlock(m_addr, this->data(), this->size_bytes());
|
||||
} else {
|
||||
m_memory.ReadBlockUnsafe(m_addr, this->data(), this->size_bytes());
|
||||
}
|
||||
}
|
||||
return m_data_span;
|
||||
}
|
||||
|
||||
void Write(std::span<T> write_data) noexcept {
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Cached) {
|
||||
m_memory.WriteBlockCached(m_addr, write_data.data(), this->size_bytes());
|
||||
} else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
m_memory.WriteBlock(m_addr, write_data.data(), this->size_bytes());
|
||||
} else {
|
||||
m_memory.WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
bool TrySetSpan() noexcept {
|
||||
if (u8* ptr = m_memory.GetSpan(m_addr, this->size_bytes()); ptr) {
|
||||
m_data_span = {reinterpret_cast<T*>(ptr), this->size()};
|
||||
m_span_valid = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool IsDataCopy() const noexcept {
|
||||
return m_is_data_copy;
|
||||
}
|
||||
|
||||
bool AddressChanged() const noexcept {
|
||||
return m_addr_changed;
|
||||
}
|
||||
|
||||
M& m_memory;
|
||||
u64 m_addr{};
|
||||
size_t m_size{};
|
||||
std::span<T> m_data_span{};
|
||||
std::vector<T> m_data_copy{};
|
||||
bool m_span_valid{false};
|
||||
bool m_is_data_copy{false};
|
||||
bool m_addr_changed{false};
|
||||
};
|
||||
|
||||
template <typename M, typename T, GuestMemoryFlags FLAGS>
|
||||
class GuestMemoryScoped : public GuestMemory<M, T, FLAGS> {
|
||||
public:
|
||||
GuestMemoryScoped() = delete;
|
||||
explicit GuestMemoryScoped(M& memory, u64 addr, std::size_t size,
|
||||
Common::ScratchBuffer<T>* backup = nullptr)
|
||||
: GuestMemory<M, T, FLAGS>(memory, addr, size, backup) {
|
||||
if constexpr (!(FLAGS & GuestMemoryFlags::Read)) {
|
||||
if (!this->TrySetSpan()) {
|
||||
if (backup) {
|
||||
this->m_data_span = *backup;
|
||||
this->m_span_valid = true;
|
||||
this->m_is_data_copy = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~GuestMemoryScoped() {
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Write) {
|
||||
if (this->size() == 0) [[unlikely]] {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this->AddressChanged() || this->IsDataCopy()) {
|
||||
ASSERT(this->m_span_valid);
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Cached) {
|
||||
this->m_memory.WriteBlockCached(this->m_addr, this->data(), this->size_bytes());
|
||||
} else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
this->m_memory.WriteBlock(this->m_addr, this->data(), this->size_bytes());
|
||||
} else {
|
||||
this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes());
|
||||
}
|
||||
} else if constexpr ((FLAGS & GuestMemoryFlags::Safe) ||
|
||||
(FLAGS & GuestMemoryFlags::Cached)) {
|
||||
this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
template <typename T, GuestMemoryFlags FLAGS>
|
||||
using CpuGuestMemory = GuestMemory<Memory, T, FLAGS>;
|
||||
using CpuGuestMemory = GuestMemory<Core::Memory::Memory, T, FLAGS>;
|
||||
template <typename T, GuestMemoryFlags FLAGS>
|
||||
using CpuGuestMemoryScoped = GuestMemoryScoped<Memory, T, FLAGS>;
|
||||
template <typename T, GuestMemoryFlags FLAGS>
|
||||
using GpuGuestMemory = GuestMemory<Tegra::MemoryManager, T, FLAGS>;
|
||||
template <typename T, GuestMemoryFlags FLAGS>
|
||||
using GpuGuestMemoryScoped = GuestMemoryScoped<Tegra::MemoryManager, T, FLAGS>;
|
||||
using CpuGuestMemoryScoped = GuestMemoryScoped<Core::Memory::Memory, T, FLAGS>;
|
||||
|
||||
} // namespace Core::Memory
|
||||
|
@ -65,6 +65,14 @@ void WriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value&
|
||||
WriteStorage(ctx, binding, offset, value, ctx.storage_types.U32, sizeof(u32),
|
||||
&StorageDefinitions::U32, index_offset);
|
||||
}
|
||||
|
||||
void WriteStorageByCasLoop(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value, Id bit_offset, Id bit_count) {
|
||||
const Id pointer{StoragePointer(ctx, binding, offset, ctx.storage_types.U32, sizeof(u32),
|
||||
&StorageDefinitions::U32)};
|
||||
ctx.OpFunctionCall(ctx.TypeVoid(), ctx.write_storage_cas_loop_func, pointer, value, bit_offset,
|
||||
bit_count);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
void EmitLoadGlobalU8(EmitContext&) {
|
||||
@ -219,26 +227,42 @@ Id EmitLoadStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Valu
|
||||
|
||||
void EmitWriteStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
if (ctx.profile.support_int8) {
|
||||
WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.U8, value), ctx.storage_types.U8,
|
||||
sizeof(u8), &StorageDefinitions::U8);
|
||||
} else {
|
||||
WriteStorageByCasLoop(ctx, binding, offset, value, ctx.BitOffset8(offset), ctx.Const(8u));
|
||||
}
|
||||
}
|
||||
|
||||
void EmitWriteStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
if (ctx.profile.support_int8) {
|
||||
WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.S8, value), ctx.storage_types.S8,
|
||||
sizeof(s8), &StorageDefinitions::S8);
|
||||
} else {
|
||||
WriteStorageByCasLoop(ctx, binding, offset, value, ctx.BitOffset8(offset), ctx.Const(8u));
|
||||
}
|
||||
}
|
||||
|
||||
void EmitWriteStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
if (ctx.profile.support_int16) {
|
||||
WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.U16, value), ctx.storage_types.U16,
|
||||
sizeof(u16), &StorageDefinitions::U16);
|
||||
} else {
|
||||
WriteStorageByCasLoop(ctx, binding, offset, value, ctx.BitOffset16(offset), ctx.Const(16u));
|
||||
}
|
||||
}
|
||||
|
||||
void EmitWriteStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
if (ctx.profile.support_int16) {
|
||||
WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.S16, value), ctx.storage_types.S16,
|
||||
sizeof(s16), &StorageDefinitions::S16);
|
||||
} else {
|
||||
WriteStorageByCasLoop(ctx, binding, offset, value, ctx.BitOffset16(offset), ctx.Const(16u));
|
||||
}
|
||||
}
|
||||
|
||||
void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
|
@ -480,6 +480,7 @@ EmitContext::EmitContext(const Profile& profile_, const RuntimeInfo& runtime_inf
|
||||
DefineTextures(program.info, texture_binding, bindings.texture_scaling_index);
|
||||
DefineImages(program.info, image_binding, bindings.image_scaling_index);
|
||||
DefineAttributeMemAccess(program.info);
|
||||
DefineWriteStorageCasLoopFunction(program.info);
|
||||
DefineGlobalMemoryFunctions(program.info);
|
||||
DefineRescalingInput(program.info);
|
||||
DefineRenderArea(program.info);
|
||||
@ -877,6 +878,56 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
||||
}
|
||||
}
|
||||
|
||||
void EmitContext::DefineWriteStorageCasLoopFunction(const Info& info) {
|
||||
if (profile.support_int8 && profile.support_int16) {
|
||||
return;
|
||||
}
|
||||
if (!info.uses_int8 && !info.uses_int16) {
|
||||
return;
|
||||
}
|
||||
|
||||
AddCapability(spv::Capability::VariablePointersStorageBuffer);
|
||||
|
||||
const Id ptr_type{TypePointer(spv::StorageClass::StorageBuffer, U32[1])};
|
||||
const Id func_type{TypeFunction(void_id, ptr_type, U32[1], U32[1], U32[1])};
|
||||
const Id func{OpFunction(void_id, spv::FunctionControlMask::MaskNone, func_type)};
|
||||
const Id pointer{OpFunctionParameter(ptr_type)};
|
||||
const Id value{OpFunctionParameter(U32[1])};
|
||||
const Id bit_offset{OpFunctionParameter(U32[1])};
|
||||
const Id bit_count{OpFunctionParameter(U32[1])};
|
||||
|
||||
AddLabel();
|
||||
const Id scope_device{Const(1u)};
|
||||
const Id ordering_relaxed{u32_zero_value};
|
||||
const Id body_label{OpLabel()};
|
||||
const Id continue_label{OpLabel()};
|
||||
const Id endloop_label{OpLabel()};
|
||||
const Id beginloop_label{OpLabel()};
|
||||
OpBranch(beginloop_label);
|
||||
|
||||
AddLabel(beginloop_label);
|
||||
OpLoopMerge(endloop_label, continue_label, spv::LoopControlMask::MaskNone);
|
||||
OpBranch(body_label);
|
||||
|
||||
AddLabel(body_label);
|
||||
const Id expected_value{OpLoad(U32[1], pointer)};
|
||||
const Id desired_value{OpBitFieldInsert(U32[1], expected_value, value, bit_offset, bit_count)};
|
||||
const Id actual_value{OpAtomicCompareExchange(U32[1], pointer, scope_device, ordering_relaxed,
|
||||
ordering_relaxed, desired_value, expected_value)};
|
||||
const Id store_successful{OpIEqual(U1, expected_value, actual_value)};
|
||||
OpBranchConditional(store_successful, endloop_label, continue_label);
|
||||
|
||||
AddLabel(endloop_label);
|
||||
OpReturn();
|
||||
|
||||
AddLabel(continue_label);
|
||||
OpBranch(beginloop_label);
|
||||
|
||||
OpFunctionEnd();
|
||||
|
||||
write_storage_cas_loop_func = func;
|
||||
}
|
||||
|
||||
void EmitContext::DefineGlobalMemoryFunctions(const Info& info) {
|
||||
if (!info.uses_global_memory || !profile.support_int64) {
|
||||
return;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user