mirror of
https://github.com/yuzu-emu/yuzu-android
synced 2025-06-07 21:50:57 -07:00
Compare commits
10 Commits
master
...
android-23
Author | SHA1 | Date | |
---|---|---|---|
|
95c74faa10 | ||
|
2ec3355348 | ||
|
8c277880d8 | ||
|
9e1e33bcca | ||
|
8e3103ceaf | ||
|
5dca9417a2 | ||
|
2cca283210 | ||
|
d90b525fc0 | ||
|
ea3e76c7f4 | ||
|
d23ae34ecc |
17
README.md
17
README.md
@ -1,3 +1,20 @@
|
||||
| Pull Request | Commit | Title | Author | Merged? |
|
||||
|----|----|----|----|----|
|
||||
| [12461](https://github.com/yuzu-emu/yuzu-android//pull/12461) | [`4c08a0e6d`](https://github.com/yuzu-emu/yuzu-android//pull/12461/files) | Rework Nvdec and VIC to fix out-of-order videos, and speed up decoding. | [Kelebek1](https://github.com/Kelebek1/) | Yes |
|
||||
| [12749](https://github.com/yuzu-emu/yuzu-android//pull/12749) | [`aad4b0d6f`](https://github.com/yuzu-emu/yuzu-android//pull/12749/files) | general: workarounds for SMMU syncing issues | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12756](https://github.com/yuzu-emu/yuzu-android//pull/12756) | [`cd3de0848`](https://github.com/yuzu-emu/yuzu-android//pull/12756/files) | general: applet multiprocess | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12873](https://github.com/yuzu-emu/yuzu-android//pull/12873) | [`023c3aa65`](https://github.com/yuzu-emu/yuzu-android//pull/12873/files) | GPU: Implement channel scheduling. | [FernandoS27](https://github.com/FernandoS27/) | Yes |
|
||||
| [12903](https://github.com/yuzu-emu/yuzu-android//pull/12903) | [`f296a9ce9`](https://github.com/yuzu-emu/yuzu-android//pull/12903/files) | shader_recompiler: use only ConstOffset for OpImageFetch | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12905](https://github.com/yuzu-emu/yuzu-android//pull/12905) | [`5eb5c9675`](https://github.com/yuzu-emu/yuzu-android//pull/12905/files) | nvnflinger: release buffers before presentation sleep | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12914](https://github.com/yuzu-emu/yuzu-android//pull/12914) | [`3a6d8ae2c`](https://github.com/yuzu-emu/yuzu-android//pull/12914/files) | VideoCore Refactor Part 1. | [FernandoS27](https://github.com/FernandoS27/) | Yes |
|
||||
| [12915](https://github.com/yuzu-emu/yuzu-android//pull/12915) | [`504abbd6e`](https://github.com/yuzu-emu/yuzu-android//pull/12915/files) | dmnt: cheats: Update cheat vm to latest version | [german77](https://github.com/german77/) | Yes |
|
||||
| [12920](https://github.com/yuzu-emu/yuzu-android//pull/12920) | [`62fc6d5c3`](https://github.com/yuzu-emu/yuzu-android//pull/12920/files) | android: Move JNI setup and helpers to common | [t895](https://github.com/t895/) | Yes |
|
||||
|
||||
|
||||
End of merge log. You can find the original README.md below the break.
|
||||
|
||||
-----
|
||||
|
||||
<!--
|
||||
SPDX-FileCopyrightText: 2018 yuzu Emulator Project
|
||||
SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
@ -164,6 +164,7 @@ else()
|
||||
|
||||
if (MINGW)
|
||||
add_definitions(-DMINGW_HAS_SECURE_API)
|
||||
add_compile_options("-msse4.1")
|
||||
|
||||
if (MINGW_STATIC_BUILD)
|
||||
add_definitions(-DQT_STATICPLUGIN)
|
||||
|
@ -2,14 +2,8 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
add_library(yuzu-android SHARED
|
||||
android_common/android_common.cpp
|
||||
android_common/android_common.h
|
||||
applets/software_keyboard.cpp
|
||||
applets/software_keyboard.h
|
||||
emu_window/emu_window.cpp
|
||||
emu_window/emu_window.h
|
||||
id_cache.cpp
|
||||
id_cache.h
|
||||
native.cpp
|
||||
native.h
|
||||
native_config.cpp
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <android/native_window_jni.h>
|
||||
|
||||
#include "common/android/id_cache.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "input_common/drivers/touch_screen.h"
|
||||
#include "input_common/drivers/virtual_amiibo.h"
|
||||
@ -60,7 +61,8 @@ void EmuWindow_Android::OnRemoveNfcTag() {
|
||||
|
||||
void EmuWindow_Android::OnFrameDisplayed() {
|
||||
if (!m_first_frame) {
|
||||
EmulationSession::GetInstance().OnEmulationStarted();
|
||||
Common::Android::RunJNIOnFiber<void>(
|
||||
[&](JNIEnv* env) { EmulationSession::GetInstance().OnEmulationStarted(); });
|
||||
m_first_frame = true;
|
||||
}
|
||||
}
|
||||
|
@ -1,13 +1,12 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/android/android_common.h"
|
||||
#include "core/core.h"
|
||||
#include "core/file_sys/fs_filesystem.h"
|
||||
#include "core/file_sys/patch_manager.h"
|
||||
#include "core/loader/loader.h"
|
||||
#include "core/loader/nro.h"
|
||||
#include "jni.h"
|
||||
#include "jni/android_common/android_common.h"
|
||||
#include "native.h"
|
||||
|
||||
struct RomMetadata {
|
||||
@ -79,7 +78,7 @@ extern "C" {
|
||||
jboolean Java_org_yuzu_yuzu_1emu_utils_GameMetadata_getIsValid(JNIEnv* env, jobject obj,
|
||||
jstring jpath) {
|
||||
const auto file = EmulationSession::GetInstance().System().GetFilesystem()->OpenFile(
|
||||
GetJString(env, jpath), FileSys::OpenMode::Read);
|
||||
Common::Android::GetJString(env, jpath), FileSys::OpenMode::Read);
|
||||
if (!file) {
|
||||
return false;
|
||||
}
|
||||
@ -104,27 +103,31 @@ jboolean Java_org_yuzu_yuzu_1emu_utils_GameMetadata_getIsValid(JNIEnv* env, jobj
|
||||
|
||||
jstring Java_org_yuzu_yuzu_1emu_utils_GameMetadata_getTitle(JNIEnv* env, jobject obj,
|
||||
jstring jpath) {
|
||||
return ToJString(env, GetRomMetadata(GetJString(env, jpath)).title);
|
||||
return Common::Android::ToJString(
|
||||
env, GetRomMetadata(Common::Android::GetJString(env, jpath)).title);
|
||||
}
|
||||
|
||||
jstring Java_org_yuzu_yuzu_1emu_utils_GameMetadata_getProgramId(JNIEnv* env, jobject obj,
|
||||
jstring jpath) {
|
||||
return ToJString(env, std::to_string(GetRomMetadata(GetJString(env, jpath)).programId));
|
||||
return Common::Android::ToJString(
|
||||
env, std::to_string(GetRomMetadata(Common::Android::GetJString(env, jpath)).programId));
|
||||
}
|
||||
|
||||
jstring Java_org_yuzu_yuzu_1emu_utils_GameMetadata_getDeveloper(JNIEnv* env, jobject obj,
|
||||
jstring jpath) {
|
||||
return ToJString(env, GetRomMetadata(GetJString(env, jpath)).developer);
|
||||
return Common::Android::ToJString(
|
||||
env, GetRomMetadata(Common::Android::GetJString(env, jpath)).developer);
|
||||
}
|
||||
|
||||
jstring Java_org_yuzu_yuzu_1emu_utils_GameMetadata_getVersion(JNIEnv* env, jobject obj,
|
||||
jstring jpath, jboolean jreload) {
|
||||
return ToJString(env, GetRomMetadata(GetJString(env, jpath), jreload).version);
|
||||
return Common::Android::ToJString(
|
||||
env, GetRomMetadata(Common::Android::GetJString(env, jpath), jreload).version);
|
||||
}
|
||||
|
||||
jbyteArray Java_org_yuzu_yuzu_1emu_utils_GameMetadata_getIcon(JNIEnv* env, jobject obj,
|
||||
jstring jpath) {
|
||||
auto icon_data = GetRomMetadata(GetJString(env, jpath)).icon;
|
||||
auto icon_data = GetRomMetadata(Common::Android::GetJString(env, jpath)).icon;
|
||||
jbyteArray icon = env->NewByteArray(static_cast<jsize>(icon_data.size()));
|
||||
env->SetByteArrayRegion(icon, 0, env->GetArrayLength(icon),
|
||||
reinterpret_cast<jbyte*>(icon_data.data()));
|
||||
@ -133,7 +136,8 @@ jbyteArray Java_org_yuzu_yuzu_1emu_utils_GameMetadata_getIcon(JNIEnv* env, jobje
|
||||
|
||||
jboolean Java_org_yuzu_yuzu_1emu_utils_GameMetadata_getIsHomebrew(JNIEnv* env, jobject obj,
|
||||
jstring jpath) {
|
||||
return static_cast<jboolean>(GetRomMetadata(GetJString(env, jpath)).isHomebrew);
|
||||
return static_cast<jboolean>(
|
||||
GetRomMetadata(Common::Android::GetJString(env, jpath)).isHomebrew);
|
||||
}
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_utils_GameMetadata_resetMetadata(JNIEnv* env, jobject obj) {
|
||||
|
@ -20,6 +20,8 @@
|
||||
#include <frontend_common/content_manager.h>
|
||||
#include <jni.h>
|
||||
|
||||
#include "common/android/android_common.h"
|
||||
#include "common/android/id_cache.h"
|
||||
#include "common/detached_tasks.h"
|
||||
#include "common/dynamic_library.h"
|
||||
#include "common/fs/path_util.h"
|
||||
@ -57,8 +59,6 @@
|
||||
#include "hid_core/frontend/emulated_controller.h"
|
||||
#include "hid_core/hid_core.h"
|
||||
#include "hid_core/hid_types.h"
|
||||
#include "jni/android_common/android_common.h"
|
||||
#include "jni/id_cache.h"
|
||||
#include "jni/native.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/renderer_vulkan/renderer_vulkan.h"
|
||||
@ -228,7 +228,7 @@ Core::SystemResultStatus EmulationSession::InitializeEmulation(const std::string
|
||||
std::make_unique<EmuWindow_Android>(&m_input_subsystem, m_native_window, m_vulkan_library);
|
||||
|
||||
// Initialize system.
|
||||
jauto android_keyboard = std::make_unique<SoftwareKeyboard::AndroidKeyboard>();
|
||||
jauto android_keyboard = std::make_unique<Common::Android::SoftwareKeyboard::AndroidKeyboard>();
|
||||
m_software_keyboard = android_keyboard.get();
|
||||
m_system.SetShuttingDown(false);
|
||||
m_system.ApplySettings();
|
||||
@ -411,37 +411,39 @@ void EmulationSession::OnGamepadDisconnectEvent([[maybe_unused]] int index) {
|
||||
controller->Disconnect();
|
||||
}
|
||||
|
||||
SoftwareKeyboard::AndroidKeyboard* EmulationSession::SoftwareKeyboard() {
|
||||
Common::Android::SoftwareKeyboard::AndroidKeyboard* EmulationSession::SoftwareKeyboard() {
|
||||
return m_software_keyboard;
|
||||
}
|
||||
|
||||
void EmulationSession::LoadDiskCacheProgress(VideoCore::LoadCallbackStage stage, int progress,
|
||||
int max) {
|
||||
JNIEnv* env = IDCache::GetEnvForThread();
|
||||
env->CallStaticVoidMethod(IDCache::GetDiskCacheProgressClass(),
|
||||
IDCache::GetDiskCacheLoadProgress(), static_cast<jint>(stage),
|
||||
JNIEnv* env = Common::Android::GetEnvForThread();
|
||||
env->CallStaticVoidMethod(Common::Android::GetDiskCacheProgressClass(),
|
||||
Common::Android::GetDiskCacheLoadProgress(), static_cast<jint>(stage),
|
||||
static_cast<jint>(progress), static_cast<jint>(max));
|
||||
}
|
||||
|
||||
void EmulationSession::OnEmulationStarted() {
|
||||
JNIEnv* env = IDCache::GetEnvForThread();
|
||||
env->CallStaticVoidMethod(IDCache::GetNativeLibraryClass(), IDCache::GetOnEmulationStarted());
|
||||
JNIEnv* env = Common::Android::GetEnvForThread();
|
||||
env->CallStaticVoidMethod(Common::Android::GetNativeLibraryClass(),
|
||||
Common::Android::GetOnEmulationStarted());
|
||||
}
|
||||
|
||||
void EmulationSession::OnEmulationStopped(Core::SystemResultStatus result) {
|
||||
JNIEnv* env = IDCache::GetEnvForThread();
|
||||
env->CallStaticVoidMethod(IDCache::GetNativeLibraryClass(), IDCache::GetOnEmulationStopped(),
|
||||
static_cast<jint>(result));
|
||||
JNIEnv* env = Common::Android::GetEnvForThread();
|
||||
env->CallStaticVoidMethod(Common::Android::GetNativeLibraryClass(),
|
||||
Common::Android::GetOnEmulationStopped(), static_cast<jint>(result));
|
||||
}
|
||||
|
||||
void EmulationSession::ChangeProgram(std::size_t program_index) {
|
||||
JNIEnv* env = IDCache::GetEnvForThread();
|
||||
env->CallStaticVoidMethod(IDCache::GetNativeLibraryClass(), IDCache::GetOnProgramChanged(),
|
||||
JNIEnv* env = Common::Android::GetEnvForThread();
|
||||
env->CallStaticVoidMethod(Common::Android::GetNativeLibraryClass(),
|
||||
Common::Android::GetOnProgramChanged(),
|
||||
static_cast<jint>(program_index));
|
||||
}
|
||||
|
||||
u64 EmulationSession::GetProgramId(JNIEnv* env, jstring jprogramId) {
|
||||
auto program_id_string = GetJString(env, jprogramId);
|
||||
auto program_id_string = Common::Android::GetJString(env, jprogramId);
|
||||
try {
|
||||
return std::stoull(program_id_string);
|
||||
} catch (...) {
|
||||
@ -491,7 +493,7 @@ void Java_org_yuzu_yuzu_1emu_NativeLibrary_surfaceDestroyed(JNIEnv* env, jobject
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_NativeLibrary_setAppDirectory(JNIEnv* env, jobject instance,
|
||||
[[maybe_unused]] jstring j_directory) {
|
||||
Common::FS::SetAppDirectory(GetJString(env, j_directory));
|
||||
Common::FS::SetAppDirectory(Common::Android::GetJString(env, j_directory));
|
||||
}
|
||||
|
||||
int Java_org_yuzu_yuzu_1emu_NativeLibrary_installFileToNand(JNIEnv* env, jobject instance,
|
||||
@ -501,21 +503,22 @@ int Java_org_yuzu_yuzu_1emu_NativeLibrary_installFileToNand(JNIEnv* env, jobject
|
||||
jlambdaClass, "invoke", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;");
|
||||
const auto callback = [env, jcallback, jlambdaInvokeMethod](size_t max, size_t progress) {
|
||||
auto jwasCancelled = env->CallObjectMethod(jcallback, jlambdaInvokeMethod,
|
||||
ToJDouble(env, max), ToJDouble(env, progress));
|
||||
return GetJBoolean(env, jwasCancelled);
|
||||
Common::Android::ToJDouble(env, max),
|
||||
Common::Android::ToJDouble(env, progress));
|
||||
return Common::Android::GetJBoolean(env, jwasCancelled);
|
||||
};
|
||||
|
||||
return static_cast<int>(
|
||||
ContentManager::InstallNSP(EmulationSession::GetInstance().System(),
|
||||
*EmulationSession::GetInstance().System().GetFilesystem(),
|
||||
GetJString(env, j_file), callback));
|
||||
Common::Android::GetJString(env, j_file), callback));
|
||||
}
|
||||
|
||||
jboolean Java_org_yuzu_yuzu_1emu_NativeLibrary_doesUpdateMatchProgram(JNIEnv* env, jobject jobj,
|
||||
jstring jprogramId,
|
||||
jstring jupdatePath) {
|
||||
u64 program_id = EmulationSession::GetProgramId(env, jprogramId);
|
||||
std::string updatePath = GetJString(env, jupdatePath);
|
||||
std::string updatePath = Common::Android::GetJString(env, jupdatePath);
|
||||
std::shared_ptr<FileSys::NSP> nsp = std::make_shared<FileSys::NSP>(
|
||||
EmulationSession::GetInstance().System().GetFilesystem()->OpenFile(
|
||||
updatePath, FileSys::OpenMode::Read));
|
||||
@ -538,8 +541,10 @@ void JNICALL Java_org_yuzu_yuzu_1emu_NativeLibrary_initializeGpuDriver(JNIEnv* e
|
||||
jstring custom_driver_name,
|
||||
jstring file_redirect_dir) {
|
||||
EmulationSession::GetInstance().InitializeGpuDriver(
|
||||
GetJString(env, hook_lib_dir), GetJString(env, custom_driver_dir),
|
||||
GetJString(env, custom_driver_name), GetJString(env, file_redirect_dir));
|
||||
Common::Android::GetJString(env, hook_lib_dir),
|
||||
Common::Android::GetJString(env, custom_driver_dir),
|
||||
Common::Android::GetJString(env, custom_driver_name),
|
||||
Common::Android::GetJString(env, file_redirect_dir));
|
||||
}
|
||||
|
||||
[[maybe_unused]] static bool CheckKgslPresent() {
|
||||
@ -566,7 +571,7 @@ jobjectArray Java_org_yuzu_yuzu_1emu_utils_GpuDriverHelper_getSystemDriverInfo(
|
||||
JNIEnv* env, jobject j_obj, jobject j_surf, jstring j_hook_lib_dir) {
|
||||
const char* file_redirect_dir_{};
|
||||
int featureFlags{};
|
||||
std::string hook_lib_dir = GetJString(env, j_hook_lib_dir);
|
||||
std::string hook_lib_dir = Common::Android::GetJString(env, j_hook_lib_dir);
|
||||
auto handle = adrenotools_open_libvulkan(RTLD_NOW, featureFlags, nullptr, hook_lib_dir.c_str(),
|
||||
nullptr, nullptr, file_redirect_dir_, nullptr);
|
||||
auto driver_library = std::make_shared<Common::DynamicLibrary>(handle);
|
||||
@ -587,9 +592,10 @@ jobjectArray Java_org_yuzu_yuzu_1emu_utils_GpuDriverHelper_getSystemDriverInfo(
|
||||
fmt::format("{}.{}.{}", VK_API_VERSION_MAJOR(driver_version),
|
||||
VK_API_VERSION_MINOR(driver_version), VK_API_VERSION_PATCH(driver_version));
|
||||
|
||||
jobjectArray j_driver_info =
|
||||
env->NewObjectArray(2, IDCache::GetStringClass(), ToJString(env, version_string));
|
||||
env->SetObjectArrayElement(j_driver_info, 1, ToJString(env, device.GetDriverName()));
|
||||
jobjectArray j_driver_info = env->NewObjectArray(
|
||||
2, Common::Android::GetStringClass(), Common::Android::ToJString(env, version_string));
|
||||
env->SetObjectArrayElement(j_driver_info, 1,
|
||||
Common::Android::ToJString(env, device.GetDriverName()));
|
||||
return j_driver_info;
|
||||
}
|
||||
|
||||
@ -742,15 +748,15 @@ jdoubleArray Java_org_yuzu_yuzu_1emu_NativeLibrary_getPerfStats(JNIEnv* env, jcl
|
||||
|
||||
jstring Java_org_yuzu_yuzu_1emu_NativeLibrary_getCpuBackend(JNIEnv* env, jclass clazz) {
|
||||
if (Settings::IsNceEnabled()) {
|
||||
return ToJString(env, "NCE");
|
||||
return Common::Android::ToJString(env, "NCE");
|
||||
}
|
||||
|
||||
return ToJString(env, "JIT");
|
||||
return Common::Android::ToJString(env, "JIT");
|
||||
}
|
||||
|
||||
jstring Java_org_yuzu_yuzu_1emu_NativeLibrary_getGpuDriver(JNIEnv* env, jobject jobj) {
|
||||
return ToJString(env,
|
||||
EmulationSession::GetInstance().System().GPU().Renderer().GetDeviceVendor());
|
||||
return Common::Android::ToJString(
|
||||
env, EmulationSession::GetInstance().System().GPU().Renderer().GetDeviceVendor());
|
||||
}
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_NativeLibrary_applySettings(JNIEnv* env, jobject jobj) {
|
||||
@ -764,13 +770,14 @@ void Java_org_yuzu_yuzu_1emu_NativeLibrary_logSettings(JNIEnv* env, jobject jobj
|
||||
void Java_org_yuzu_yuzu_1emu_NativeLibrary_run(JNIEnv* env, jobject jobj, jstring j_path,
|
||||
jint j_program_index,
|
||||
jboolean j_frontend_initiated) {
|
||||
const std::string path = GetJString(env, j_path);
|
||||
const std::string path = Common::Android::GetJString(env, j_path);
|
||||
|
||||
const Core::SystemResultStatus result{
|
||||
RunEmulation(path, j_program_index, j_frontend_initiated)};
|
||||
if (result != Core::SystemResultStatus::Success) {
|
||||
env->CallStaticVoidMethod(IDCache::GetNativeLibraryClass(),
|
||||
IDCache::GetExitEmulationActivity(), static_cast<int>(result));
|
||||
env->CallStaticVoidMethod(Common::Android::GetNativeLibraryClass(),
|
||||
Common::Android::GetExitEmulationActivity(),
|
||||
static_cast<int>(result));
|
||||
}
|
||||
}
|
||||
|
||||
@ -781,7 +788,7 @@ void Java_org_yuzu_yuzu_1emu_NativeLibrary_logDeviceInfo(JNIEnv* env, jclass cla
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_NativeLibrary_submitInlineKeyboardText(JNIEnv* env, jclass clazz,
|
||||
jstring j_text) {
|
||||
const std::u16string input = Common::UTF8ToUTF16(GetJString(env, j_text));
|
||||
const std::u16string input = Common::UTF8ToUTF16(Common::Android::GetJString(env, j_text));
|
||||
EmulationSession::GetInstance().SoftwareKeyboard()->SubmitInlineKeyboardText(input);
|
||||
}
|
||||
|
||||
@ -815,16 +822,16 @@ jstring Java_org_yuzu_yuzu_1emu_NativeLibrary_getAppletLaunchPath(JNIEnv* env, j
|
||||
auto bis_system =
|
||||
EmulationSession::GetInstance().System().GetFileSystemController().GetSystemNANDContents();
|
||||
if (!bis_system) {
|
||||
return ToJString(env, "");
|
||||
return Common::Android::ToJString(env, "");
|
||||
}
|
||||
|
||||
auto applet_nca =
|
||||
bis_system->GetEntry(static_cast<u64>(jid), FileSys::ContentRecordType::Program);
|
||||
if (!applet_nca) {
|
||||
return ToJString(env, "");
|
||||
return Common::Android::ToJString(env, "");
|
||||
}
|
||||
|
||||
return ToJString(env, applet_nca->GetFullPath());
|
||||
return Common::Android::ToJString(env, applet_nca->GetFullPath());
|
||||
}
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_NativeLibrary_setCurrentAppletId(JNIEnv* env, jclass clazz,
|
||||
@ -857,7 +864,7 @@ jboolean Java_org_yuzu_yuzu_1emu_NativeLibrary_isFirmwareAvailable(JNIEnv* env,
|
||||
jobjectArray Java_org_yuzu_yuzu_1emu_NativeLibrary_getPatchesForFile(JNIEnv* env, jobject jobj,
|
||||
jstring jpath,
|
||||
jstring jprogramId) {
|
||||
const auto path = GetJString(env, jpath);
|
||||
const auto path = Common::Android::GetJString(env, jpath);
|
||||
const auto vFile =
|
||||
Core::GetGameFileFromPath(EmulationSession::GetInstance().System().GetFilesystem(), path);
|
||||
if (vFile == nullptr) {
|
||||
@ -875,14 +882,15 @@ jobjectArray Java_org_yuzu_yuzu_1emu_NativeLibrary_getPatchesForFile(JNIEnv* env
|
||||
|
||||
auto patches = pm.GetPatches(update_raw);
|
||||
jobjectArray jpatchArray =
|
||||
env->NewObjectArray(patches.size(), IDCache::GetPatchClass(), nullptr);
|
||||
env->NewObjectArray(patches.size(), Common::Android::GetPatchClass(), nullptr);
|
||||
int i = 0;
|
||||
for (const auto& patch : patches) {
|
||||
jobject jpatch = env->NewObject(
|
||||
IDCache::GetPatchClass(), IDCache::GetPatchConstructor(), patch.enabled,
|
||||
ToJString(env, patch.name), ToJString(env, patch.version),
|
||||
static_cast<jint>(patch.type), ToJString(env, std::to_string(patch.program_id)),
|
||||
ToJString(env, std::to_string(patch.title_id)));
|
||||
Common::Android::GetPatchClass(), Common::Android::GetPatchConstructor(), patch.enabled,
|
||||
Common::Android::ToJString(env, patch.name),
|
||||
Common::Android::ToJString(env, patch.version), static_cast<jint>(patch.type),
|
||||
Common::Android::ToJString(env, std::to_string(patch.program_id)),
|
||||
Common::Android::ToJString(env, std::to_string(patch.title_id)));
|
||||
env->SetObjectArrayElement(jpatchArray, i, jpatch);
|
||||
++i;
|
||||
}
|
||||
@ -906,7 +914,7 @@ void Java_org_yuzu_yuzu_1emu_NativeLibrary_removeMod(JNIEnv* env, jobject jobj,
|
||||
jstring jname) {
|
||||
auto program_id = EmulationSession::GetProgramId(env, jprogramId);
|
||||
ContentManager::RemoveMod(EmulationSession::GetInstance().System().GetFileSystemController(),
|
||||
program_id, GetJString(env, jname));
|
||||
program_id, Common::Android::GetJString(env, jname));
|
||||
}
|
||||
|
||||
jobjectArray Java_org_yuzu_yuzu_1emu_NativeLibrary_verifyInstalledContents(JNIEnv* env,
|
||||
@ -917,17 +925,18 @@ jobjectArray Java_org_yuzu_yuzu_1emu_NativeLibrary_verifyInstalledContents(JNIEn
|
||||
jlambdaClass, "invoke", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;");
|
||||
const auto callback = [env, jcallback, jlambdaInvokeMethod](size_t max, size_t progress) {
|
||||
auto jwasCancelled = env->CallObjectMethod(jcallback, jlambdaInvokeMethod,
|
||||
ToJDouble(env, max), ToJDouble(env, progress));
|
||||
return GetJBoolean(env, jwasCancelled);
|
||||
Common::Android::ToJDouble(env, max),
|
||||
Common::Android::ToJDouble(env, progress));
|
||||
return Common::Android::GetJBoolean(env, jwasCancelled);
|
||||
};
|
||||
|
||||
auto& session = EmulationSession::GetInstance();
|
||||
std::vector<std::string> result = ContentManager::VerifyInstalledContents(
|
||||
session.System(), *session.GetContentProvider(), callback);
|
||||
jobjectArray jresult =
|
||||
env->NewObjectArray(result.size(), IDCache::GetStringClass(), ToJString(env, ""));
|
||||
jobjectArray jresult = env->NewObjectArray(result.size(), Common::Android::GetStringClass(),
|
||||
Common::Android::ToJString(env, ""));
|
||||
for (size_t i = 0; i < result.size(); ++i) {
|
||||
env->SetObjectArrayElement(jresult, i, ToJString(env, result[i]));
|
||||
env->SetObjectArrayElement(jresult, i, Common::Android::ToJString(env, result[i]));
|
||||
}
|
||||
return jresult;
|
||||
}
|
||||
@ -939,19 +948,20 @@ jint Java_org_yuzu_yuzu_1emu_NativeLibrary_verifyGameContents(JNIEnv* env, jobje
|
||||
jlambdaClass, "invoke", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;");
|
||||
const auto callback = [env, jcallback, jlambdaInvokeMethod](size_t max, size_t progress) {
|
||||
auto jwasCancelled = env->CallObjectMethod(jcallback, jlambdaInvokeMethod,
|
||||
ToJDouble(env, max), ToJDouble(env, progress));
|
||||
return GetJBoolean(env, jwasCancelled);
|
||||
Common::Android::ToJDouble(env, max),
|
||||
Common::Android::ToJDouble(env, progress));
|
||||
return Common::Android::GetJBoolean(env, jwasCancelled);
|
||||
};
|
||||
auto& session = EmulationSession::GetInstance();
|
||||
return static_cast<jint>(
|
||||
ContentManager::VerifyGameContents(session.System(), GetJString(env, jpath), callback));
|
||||
return static_cast<jint>(ContentManager::VerifyGameContents(
|
||||
session.System(), Common::Android::GetJString(env, jpath), callback));
|
||||
}
|
||||
|
||||
jstring Java_org_yuzu_yuzu_1emu_NativeLibrary_getSavePath(JNIEnv* env, jobject jobj,
|
||||
jstring jprogramId) {
|
||||
auto program_id = EmulationSession::GetProgramId(env, jprogramId);
|
||||
if (program_id == 0) {
|
||||
return ToJString(env, "");
|
||||
return Common::Android::ToJString(env, "");
|
||||
}
|
||||
|
||||
auto& system = EmulationSession::GetInstance().System();
|
||||
@ -968,7 +978,7 @@ jstring Java_org_yuzu_yuzu_1emu_NativeLibrary_getSavePath(JNIEnv* env, jobject j
|
||||
const auto user_save_data_path = FileSys::SaveDataFactory::GetFullPath(
|
||||
{}, vfsNandDir, FileSys::SaveDataSpaceId::NandUser, FileSys::SaveDataType::SaveData,
|
||||
program_id, user_id->AsU128(), 0);
|
||||
return ToJString(env, user_save_data_path);
|
||||
return Common::Android::ToJString(env, user_save_data_path);
|
||||
}
|
||||
|
||||
jstring Java_org_yuzu_yuzu_1emu_NativeLibrary_getDefaultProfileSaveDataRoot(JNIEnv* env,
|
||||
@ -981,12 +991,13 @@ jstring Java_org_yuzu_yuzu_1emu_NativeLibrary_getDefaultProfileSaveDataRoot(JNIE
|
||||
|
||||
const auto user_save_data_root =
|
||||
FileSys::SaveDataFactory::GetUserGameSaveDataRoot(user_id->AsU128(), jfuture);
|
||||
return ToJString(env, user_save_data_root);
|
||||
return Common::Android::ToJString(env, user_save_data_root);
|
||||
}
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_NativeLibrary_addFileToFilesystemProvider(JNIEnv* env, jobject jobj,
|
||||
jstring jpath) {
|
||||
EmulationSession::GetInstance().ConfigureFilesystemProvider(GetJString(env, jpath));
|
||||
EmulationSession::GetInstance().ConfigureFilesystemProvider(
|
||||
Common::Android::GetJString(env, jpath));
|
||||
}
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_NativeLibrary_clearFilesystemProvider(JNIEnv* env, jobject jobj) {
|
||||
|
@ -2,13 +2,13 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <android/native_window_jni.h>
|
||||
#include "common/android/applets/software_keyboard.h"
|
||||
#include "common/detached_tasks.h"
|
||||
#include "core/core.h"
|
||||
#include "core/file_sys/registered_cache.h"
|
||||
#include "core/hle/service/acc/profile_manager.h"
|
||||
#include "core/perf_stats.h"
|
||||
#include "frontend_common/content_manager.h"
|
||||
#include "jni/applets/software_keyboard.h"
|
||||
#include "jni/emu_window/emu_window.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
|
||||
@ -54,7 +54,7 @@ public:
|
||||
void SetDeviceType([[maybe_unused]] int index, int type);
|
||||
void OnGamepadConnectEvent([[maybe_unused]] int index);
|
||||
void OnGamepadDisconnectEvent([[maybe_unused]] int index);
|
||||
SoftwareKeyboard::AndroidKeyboard* SoftwareKeyboard();
|
||||
Common::Android::SoftwareKeyboard::AndroidKeyboard* SoftwareKeyboard();
|
||||
|
||||
static void OnEmulationStarted();
|
||||
|
||||
@ -79,7 +79,7 @@ private:
|
||||
Core::SystemResultStatus m_load_result{Core::SystemResultStatus::ErrorNotInitialized};
|
||||
std::atomic<bool> m_is_running = false;
|
||||
std::atomic<bool> m_is_paused = false;
|
||||
SoftwareKeyboard::AndroidKeyboard* m_software_keyboard{};
|
||||
Common::Android::SoftwareKeyboard::AndroidKeyboard* m_software_keyboard{};
|
||||
std::unique_ptr<FileSys::ManualContentProvider> m_manual_provider;
|
||||
int m_applet_id{1};
|
||||
|
||||
|
@ -8,11 +8,11 @@
|
||||
|
||||
#include "android_config.h"
|
||||
#include "android_settings.h"
|
||||
#include "common/android/android_common.h"
|
||||
#include "common/android/id_cache.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/settings.h"
|
||||
#include "frontend_common/config.h"
|
||||
#include "jni/android_common/android_common.h"
|
||||
#include "jni/id_cache.h"
|
||||
#include "native.h"
|
||||
|
||||
std::unique_ptr<AndroidConfig> global_config;
|
||||
@ -20,7 +20,7 @@ std::unique_ptr<AndroidConfig> per_game_config;
|
||||
|
||||
template <typename T>
|
||||
Settings::Setting<T>* getSetting(JNIEnv* env, jstring jkey) {
|
||||
auto key = GetJString(env, jkey);
|
||||
auto key = Common::Android::GetJString(env, jkey);
|
||||
auto basic_setting = Settings::values.linkage.by_key[key];
|
||||
if (basic_setting != 0) {
|
||||
return static_cast<Settings::Setting<T>*>(basic_setting);
|
||||
@ -55,7 +55,7 @@ void Java_org_yuzu_yuzu_1emu_utils_NativeConfig_initializePerGameConfig(JNIEnv*
|
||||
jstring jprogramId,
|
||||
jstring jfileName) {
|
||||
auto program_id = EmulationSession::GetProgramId(env, jprogramId);
|
||||
auto file_name = GetJString(env, jfileName);
|
||||
auto file_name = Common::Android::GetJString(env, jfileName);
|
||||
const auto config_file_name = program_id == 0 ? file_name : fmt::format("{:016X}", program_id);
|
||||
per_game_config =
|
||||
std::make_unique<AndroidConfig>(config_file_name, Config::ConfigType::PerGameConfig);
|
||||
@ -186,9 +186,9 @@ jstring Java_org_yuzu_yuzu_1emu_utils_NativeConfig_getString(JNIEnv* env, jobjec
|
||||
jboolean needGlobal) {
|
||||
auto setting = getSetting<std::string>(env, jkey);
|
||||
if (setting == nullptr) {
|
||||
return ToJString(env, "");
|
||||
return Common::Android::ToJString(env, "");
|
||||
}
|
||||
return ToJString(env, setting->GetValue(static_cast<bool>(needGlobal)));
|
||||
return Common::Android::ToJString(env, setting->GetValue(static_cast<bool>(needGlobal)));
|
||||
}
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_utils_NativeConfig_setString(JNIEnv* env, jobject obj, jstring jkey,
|
||||
@ -198,7 +198,7 @@ void Java_org_yuzu_yuzu_1emu_utils_NativeConfig_setString(JNIEnv* env, jobject o
|
||||
return;
|
||||
}
|
||||
|
||||
setting->SetValue(GetJString(env, value));
|
||||
setting->SetValue(Common::Android::GetJString(env, value));
|
||||
}
|
||||
|
||||
jboolean Java_org_yuzu_yuzu_1emu_utils_NativeConfig_getIsRuntimeModifiable(JNIEnv* env, jobject obj,
|
||||
@ -214,13 +214,13 @@ jstring Java_org_yuzu_yuzu_1emu_utils_NativeConfig_getPairedSettingKey(JNIEnv* e
|
||||
jstring jkey) {
|
||||
auto setting = getSetting<std::string>(env, jkey);
|
||||
if (setting == nullptr) {
|
||||
return ToJString(env, "");
|
||||
return Common::Android::ToJString(env, "");
|
||||
}
|
||||
if (setting->PairedSetting() == nullptr) {
|
||||
return ToJString(env, "");
|
||||
return Common::Android::ToJString(env, "");
|
||||
}
|
||||
|
||||
return ToJString(env, setting->PairedSetting()->GetLabel());
|
||||
return Common::Android::ToJString(env, setting->PairedSetting()->GetLabel());
|
||||
}
|
||||
|
||||
jboolean Java_org_yuzu_yuzu_1emu_utils_NativeConfig_getIsSwitchable(JNIEnv* env, jobject obj,
|
||||
@ -262,21 +262,21 @@ jstring Java_org_yuzu_yuzu_1emu_utils_NativeConfig_getDefaultToString(JNIEnv* en
|
||||
jstring jkey) {
|
||||
auto setting = getSetting<std::string>(env, jkey);
|
||||
if (setting != nullptr) {
|
||||
return ToJString(env, setting->DefaultToString());
|
||||
return Common::Android::ToJString(env, setting->DefaultToString());
|
||||
}
|
||||
return ToJString(env, "");
|
||||
return Common::Android::ToJString(env, "");
|
||||
}
|
||||
|
||||
jobjectArray Java_org_yuzu_yuzu_1emu_utils_NativeConfig_getGameDirs(JNIEnv* env, jobject obj) {
|
||||
jclass gameDirClass = IDCache::GetGameDirClass();
|
||||
jmethodID gameDirConstructor = IDCache::GetGameDirConstructor();
|
||||
jclass gameDirClass = Common::Android::GetGameDirClass();
|
||||
jmethodID gameDirConstructor = Common::Android::GetGameDirConstructor();
|
||||
jobjectArray jgameDirArray =
|
||||
env->NewObjectArray(AndroidSettings::values.game_dirs.size(), gameDirClass, nullptr);
|
||||
for (size_t i = 0; i < AndroidSettings::values.game_dirs.size(); ++i) {
|
||||
jobject jgameDir =
|
||||
env->NewObject(gameDirClass, gameDirConstructor,
|
||||
ToJString(env, AndroidSettings::values.game_dirs[i].path),
|
||||
static_cast<jboolean>(AndroidSettings::values.game_dirs[i].deep_scan));
|
||||
jobject jgameDir = env->NewObject(
|
||||
gameDirClass, gameDirConstructor,
|
||||
Common::Android::ToJString(env, AndroidSettings::values.game_dirs[i].path),
|
||||
static_cast<jboolean>(AndroidSettings::values.game_dirs[i].deep_scan));
|
||||
env->SetObjectArrayElement(jgameDirArray, i, jgameDir);
|
||||
}
|
||||
return jgameDirArray;
|
||||
@ -292,14 +292,14 @@ void Java_org_yuzu_yuzu_1emu_utils_NativeConfig_setGameDirs(JNIEnv* env, jobject
|
||||
}
|
||||
|
||||
jobject dir = env->GetObjectArrayElement(gameDirs, 0);
|
||||
jclass gameDirClass = IDCache::GetGameDirClass();
|
||||
jclass gameDirClass = Common::Android::GetGameDirClass();
|
||||
jfieldID uriStringField = env->GetFieldID(gameDirClass, "uriString", "Ljava/lang/String;");
|
||||
jfieldID deepScanBooleanField = env->GetFieldID(gameDirClass, "deepScan", "Z");
|
||||
for (int i = 0; i < size; ++i) {
|
||||
dir = env->GetObjectArrayElement(gameDirs, i);
|
||||
jstring juriString = static_cast<jstring>(env->GetObjectField(dir, uriStringField));
|
||||
jboolean jdeepScanBoolean = env->GetBooleanField(dir, deepScanBooleanField);
|
||||
std::string uriString = GetJString(env, juriString);
|
||||
std::string uriString = Common::Android::GetJString(env, juriString);
|
||||
AndroidSettings::values.game_dirs.push_back(
|
||||
AndroidSettings::GameDir{uriString, static_cast<bool>(jdeepScanBoolean)});
|
||||
}
|
||||
@ -307,13 +307,13 @@ void Java_org_yuzu_yuzu_1emu_utils_NativeConfig_setGameDirs(JNIEnv* env, jobject
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_utils_NativeConfig_addGameDir(JNIEnv* env, jobject obj,
|
||||
jobject gameDir) {
|
||||
jclass gameDirClass = IDCache::GetGameDirClass();
|
||||
jclass gameDirClass = Common::Android::GetGameDirClass();
|
||||
jfieldID uriStringField = env->GetFieldID(gameDirClass, "uriString", "Ljava/lang/String;");
|
||||
jfieldID deepScanBooleanField = env->GetFieldID(gameDirClass, "deepScan", "Z");
|
||||
|
||||
jstring juriString = static_cast<jstring>(env->GetObjectField(gameDir, uriStringField));
|
||||
jboolean jdeepScanBoolean = env->GetBooleanField(gameDir, deepScanBooleanField);
|
||||
std::string uriString = GetJString(env, juriString);
|
||||
std::string uriString = Common::Android::GetJString(env, juriString);
|
||||
AndroidSettings::values.game_dirs.push_back(
|
||||
AndroidSettings::GameDir{uriString, static_cast<bool>(jdeepScanBoolean)});
|
||||
}
|
||||
@ -323,9 +323,11 @@ jobjectArray Java_org_yuzu_yuzu_1emu_utils_NativeConfig_getDisabledAddons(JNIEnv
|
||||
auto program_id = EmulationSession::GetProgramId(env, jprogramId);
|
||||
auto& disabledAddons = Settings::values.disabled_addons[program_id];
|
||||
jobjectArray jdisabledAddonsArray =
|
||||
env->NewObjectArray(disabledAddons.size(), IDCache::GetStringClass(), ToJString(env, ""));
|
||||
env->NewObjectArray(disabledAddons.size(), Common::Android::GetStringClass(),
|
||||
Common::Android::ToJString(env, ""));
|
||||
for (size_t i = 0; i < disabledAddons.size(); ++i) {
|
||||
env->SetObjectArrayElement(jdisabledAddonsArray, i, ToJString(env, disabledAddons[i]));
|
||||
env->SetObjectArrayElement(jdisabledAddonsArray, i,
|
||||
Common::Android::ToJString(env, disabledAddons[i]));
|
||||
}
|
||||
return jdisabledAddonsArray;
|
||||
}
|
||||
@ -339,7 +341,7 @@ void Java_org_yuzu_yuzu_1emu_utils_NativeConfig_setDisabledAddons(JNIEnv* env, j
|
||||
const int size = env->GetArrayLength(jdisabledAddons);
|
||||
for (int i = 0; i < size; ++i) {
|
||||
auto jaddon = static_cast<jstring>(env->GetObjectArrayElement(jdisabledAddons, i));
|
||||
disabled_addons.push_back(GetJString(env, jaddon));
|
||||
disabled_addons.push_back(Common::Android::GetJString(env, jaddon));
|
||||
}
|
||||
Settings::values.disabled_addons[program_id] = disabled_addons;
|
||||
}
|
||||
@ -348,26 +350,27 @@ jobjectArray Java_org_yuzu_yuzu_1emu_utils_NativeConfig_getOverlayControlData(JN
|
||||
jobject obj) {
|
||||
jobjectArray joverlayControlDataArray =
|
||||
env->NewObjectArray(AndroidSettings::values.overlay_control_data.size(),
|
||||
IDCache::GetOverlayControlDataClass(), nullptr);
|
||||
Common::Android::GetOverlayControlDataClass(), nullptr);
|
||||
for (size_t i = 0; i < AndroidSettings::values.overlay_control_data.size(); ++i) {
|
||||
const auto& control_data = AndroidSettings::values.overlay_control_data[i];
|
||||
jobject jlandscapePosition =
|
||||
env->NewObject(IDCache::GetPairClass(), IDCache::GetPairConstructor(),
|
||||
ToJDouble(env, control_data.landscape_position.first),
|
||||
ToJDouble(env, control_data.landscape_position.second));
|
||||
env->NewObject(Common::Android::GetPairClass(), Common::Android::GetPairConstructor(),
|
||||
Common::Android::ToJDouble(env, control_data.landscape_position.first),
|
||||
Common::Android::ToJDouble(env, control_data.landscape_position.second));
|
||||
jobject jportraitPosition =
|
||||
env->NewObject(IDCache::GetPairClass(), IDCache::GetPairConstructor(),
|
||||
ToJDouble(env, control_data.portrait_position.first),
|
||||
ToJDouble(env, control_data.portrait_position.second));
|
||||
env->NewObject(Common::Android::GetPairClass(), Common::Android::GetPairConstructor(),
|
||||
Common::Android::ToJDouble(env, control_data.portrait_position.first),
|
||||
Common::Android::ToJDouble(env, control_data.portrait_position.second));
|
||||
jobject jfoldablePosition =
|
||||
env->NewObject(IDCache::GetPairClass(), IDCache::GetPairConstructor(),
|
||||
ToJDouble(env, control_data.foldable_position.first),
|
||||
ToJDouble(env, control_data.foldable_position.second));
|
||||
env->NewObject(Common::Android::GetPairClass(), Common::Android::GetPairConstructor(),
|
||||
Common::Android::ToJDouble(env, control_data.foldable_position.first),
|
||||
Common::Android::ToJDouble(env, control_data.foldable_position.second));
|
||||
|
||||
jobject jcontrolData = env->NewObject(
|
||||
IDCache::GetOverlayControlDataClass(), IDCache::GetOverlayControlDataConstructor(),
|
||||
ToJString(env, control_data.id), control_data.enabled, jlandscapePosition,
|
||||
jportraitPosition, jfoldablePosition);
|
||||
jobject jcontrolData =
|
||||
env->NewObject(Common::Android::GetOverlayControlDataClass(),
|
||||
Common::Android::GetOverlayControlDataConstructor(),
|
||||
Common::Android::ToJString(env, control_data.id), control_data.enabled,
|
||||
jlandscapePosition, jportraitPosition, jfoldablePosition);
|
||||
env->SetObjectArrayElement(joverlayControlDataArray, i, jcontrolData);
|
||||
}
|
||||
return joverlayControlDataArray;
|
||||
@ -384,33 +387,41 @@ void Java_org_yuzu_yuzu_1emu_utils_NativeConfig_setOverlayControlData(
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
jobject joverlayControlData = env->GetObjectArrayElement(joverlayControlDataArray, i);
|
||||
jstring jidString = static_cast<jstring>(
|
||||
env->GetObjectField(joverlayControlData, IDCache::GetOverlayControlDataIdField()));
|
||||
jstring jidString = static_cast<jstring>(env->GetObjectField(
|
||||
joverlayControlData, Common::Android::GetOverlayControlDataIdField()));
|
||||
bool enabled = static_cast<bool>(env->GetBooleanField(
|
||||
joverlayControlData, IDCache::GetOverlayControlDataEnabledField()));
|
||||
joverlayControlData, Common::Android::GetOverlayControlDataEnabledField()));
|
||||
|
||||
jobject jlandscapePosition = env->GetObjectField(
|
||||
joverlayControlData, IDCache::GetOverlayControlDataLandscapePositionField());
|
||||
joverlayControlData, Common::Android::GetOverlayControlDataLandscapePositionField());
|
||||
std::pair<double, double> landscape_position = std::make_pair(
|
||||
GetJDouble(env, env->GetObjectField(jlandscapePosition, IDCache::GetPairFirstField())),
|
||||
GetJDouble(env,
|
||||
env->GetObjectField(jlandscapePosition, IDCache::GetPairSecondField())));
|
||||
Common::Android::GetJDouble(
|
||||
env, env->GetObjectField(jlandscapePosition, Common::Android::GetPairFirstField())),
|
||||
Common::Android::GetJDouble(
|
||||
env,
|
||||
env->GetObjectField(jlandscapePosition, Common::Android::GetPairSecondField())));
|
||||
|
||||
jobject jportraitPosition = env->GetObjectField(
|
||||
joverlayControlData, IDCache::GetOverlayControlDataPortraitPositionField());
|
||||
joverlayControlData, Common::Android::GetOverlayControlDataPortraitPositionField());
|
||||
std::pair<double, double> portrait_position = std::make_pair(
|
||||
GetJDouble(env, env->GetObjectField(jportraitPosition, IDCache::GetPairFirstField())),
|
||||
GetJDouble(env, env->GetObjectField(jportraitPosition, IDCache::GetPairSecondField())));
|
||||
Common::Android::GetJDouble(
|
||||
env, env->GetObjectField(jportraitPosition, Common::Android::GetPairFirstField())),
|
||||
Common::Android::GetJDouble(
|
||||
env,
|
||||
env->GetObjectField(jportraitPosition, Common::Android::GetPairSecondField())));
|
||||
|
||||
jobject jfoldablePosition = env->GetObjectField(
|
||||
joverlayControlData, IDCache::GetOverlayControlDataFoldablePositionField());
|
||||
joverlayControlData, Common::Android::GetOverlayControlDataFoldablePositionField());
|
||||
std::pair<double, double> foldable_position = std::make_pair(
|
||||
GetJDouble(env, env->GetObjectField(jfoldablePosition, IDCache::GetPairFirstField())),
|
||||
GetJDouble(env, env->GetObjectField(jfoldablePosition, IDCache::GetPairSecondField())));
|
||||
Common::Android::GetJDouble(
|
||||
env, env->GetObjectField(jfoldablePosition, Common::Android::GetPairFirstField())),
|
||||
Common::Android::GetJDouble(
|
||||
env,
|
||||
env->GetObjectField(jfoldablePosition, Common::Android::GetPairSecondField())));
|
||||
|
||||
AndroidSettings::values.overlay_control_data.push_back(AndroidSettings::OverlayControlData{
|
||||
GetJString(env, jidString), enabled, landscape_position, portrait_position,
|
||||
foldable_position});
|
||||
Common::Android::GetJString(env, jidString), enabled, landscape_position,
|
||||
portrait_position, foldable_position});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,31 +1,30 @@
|
||||
// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <common/android/android_common.h>
|
||||
#include <common/logging/log.h>
|
||||
#include <jni.h>
|
||||
|
||||
#include "android_common/android_common.h"
|
||||
|
||||
extern "C" {
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_utils_Log_debug(JNIEnv* env, jobject obj, jstring jmessage) {
|
||||
LOG_DEBUG(Frontend, "{}", GetJString(env, jmessage));
|
||||
LOG_DEBUG(Frontend, "{}", Common::Android::GetJString(env, jmessage));
|
||||
}
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_utils_Log_warning(JNIEnv* env, jobject obj, jstring jmessage) {
|
||||
LOG_WARNING(Frontend, "{}", GetJString(env, jmessage));
|
||||
LOG_WARNING(Frontend, "{}", Common::Android::GetJString(env, jmessage));
|
||||
}
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_utils_Log_info(JNIEnv* env, jobject obj, jstring jmessage) {
|
||||
LOG_INFO(Frontend, "{}", GetJString(env, jmessage));
|
||||
LOG_INFO(Frontend, "{}", Common::Android::GetJString(env, jmessage));
|
||||
}
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_utils_Log_error(JNIEnv* env, jobject obj, jstring jmessage) {
|
||||
LOG_ERROR(Frontend, "{}", GetJString(env, jmessage));
|
||||
LOG_ERROR(Frontend, "{}", Common::Android::GetJString(env, jmessage));
|
||||
}
|
||||
|
||||
void Java_org_yuzu_yuzu_1emu_utils_Log_critical(JNIEnv* env, jobject obj, jstring jmessage) {
|
||||
LOG_CRITICAL(Frontend, "{}", GetJString(env, jmessage));
|
||||
LOG_CRITICAL(Frontend, "{}", Common::Android::GetJString(env, jmessage));
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
@ -107,6 +107,8 @@ add_library(common STATIC
|
||||
quaternion.h
|
||||
range_map.h
|
||||
range_mutex.h
|
||||
range_sets.h
|
||||
range_sets.inc
|
||||
reader_writer_queue.h
|
||||
ring_buffer.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/scm_rev.cpp
|
||||
@ -121,6 +123,7 @@ add_library(common STATIC
|
||||
settings_input.cpp
|
||||
settings_input.h
|
||||
settings_setting.h
|
||||
slot_vector.h
|
||||
socket_types.h
|
||||
spin_lock.cpp
|
||||
spin_lock.h
|
||||
@ -179,9 +182,15 @@ endif()
|
||||
|
||||
if(ANDROID)
|
||||
target_sources(common
|
||||
PRIVATE
|
||||
PUBLIC
|
||||
fs/fs_android.cpp
|
||||
fs/fs_android.h
|
||||
android/android_common.cpp
|
||||
android/android_common.h
|
||||
android/id_cache.cpp
|
||||
android/id_cache.h
|
||||
android/applets/software_keyboard.cpp
|
||||
android/applets/software_keyboard.h
|
||||
)
|
||||
endif()
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "jni/android_common/android_common.h"
|
||||
#include "android_common.h"
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
@ -9,7 +9,9 @@
|
||||
#include <jni.h>
|
||||
|
||||
#include "common/string_util.h"
|
||||
#include "jni/id_cache.h"
|
||||
#include "id_cache.h"
|
||||
|
||||
namespace Common::Android {
|
||||
|
||||
std::string GetJString(JNIEnv* env, jstring jstr) {
|
||||
if (!jstr) {
|
||||
@ -18,7 +20,8 @@ std::string GetJString(JNIEnv* env, jstring jstr) {
|
||||
|
||||
const jchar* jchars = env->GetStringChars(jstr, nullptr);
|
||||
const jsize length = env->GetStringLength(jstr);
|
||||
const std::u16string_view string_view(reinterpret_cast<const char16_t*>(jchars), length);
|
||||
const std::u16string_view string_view(reinterpret_cast<const char16_t*>(jchars),
|
||||
static_cast<u32>(length));
|
||||
const std::string converted_string = Common::UTF16ToUTF8(string_view);
|
||||
env->ReleaseStringChars(jstr, jchars);
|
||||
|
||||
@ -36,25 +39,27 @@ jstring ToJString(JNIEnv* env, std::u16string_view str) {
|
||||
}
|
||||
|
||||
double GetJDouble(JNIEnv* env, jobject jdouble) {
|
||||
return env->GetDoubleField(jdouble, IDCache::GetDoubleValueField());
|
||||
return env->GetDoubleField(jdouble, GetDoubleValueField());
|
||||
}
|
||||
|
||||
jobject ToJDouble(JNIEnv* env, double value) {
|
||||
return env->NewObject(IDCache::GetDoubleClass(), IDCache::GetDoubleConstructor(), value);
|
||||
return env->NewObject(GetDoubleClass(), GetDoubleConstructor(), value);
|
||||
}
|
||||
|
||||
s32 GetJInteger(JNIEnv* env, jobject jinteger) {
|
||||
return env->GetIntField(jinteger, IDCache::GetIntegerValueField());
|
||||
return env->GetIntField(jinteger, GetIntegerValueField());
|
||||
}
|
||||
|
||||
jobject ToJInteger(JNIEnv* env, s32 value) {
|
||||
return env->NewObject(IDCache::GetIntegerClass(), IDCache::GetIntegerConstructor(), value);
|
||||
return env->NewObject(GetIntegerClass(), GetIntegerConstructor(), value);
|
||||
}
|
||||
|
||||
bool GetJBoolean(JNIEnv* env, jobject jboolean) {
|
||||
return env->GetBooleanField(jboolean, IDCache::GetBooleanValueField());
|
||||
return env->GetBooleanField(jboolean, GetBooleanValueField());
|
||||
}
|
||||
|
||||
jobject ToJBoolean(JNIEnv* env, bool value) {
|
||||
return env->NewObject(IDCache::GetBooleanClass(), IDCache::GetBooleanConstructor(), value);
|
||||
return env->NewObject(GetBooleanClass(), GetBooleanConstructor(), value);
|
||||
}
|
||||
|
||||
} // namespace Common::Android
|
@ -8,6 +8,8 @@
|
||||
#include <jni.h>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common::Android {
|
||||
|
||||
std::string GetJString(JNIEnv* env, jstring jstr);
|
||||
jstring ToJString(JNIEnv* env, std::string_view str);
|
||||
jstring ToJString(JNIEnv* env, std::u16string_view str);
|
||||
@ -20,3 +22,5 @@ jobject ToJInteger(JNIEnv* env, s32 value);
|
||||
|
||||
bool GetJBoolean(JNIEnv* env, jobject jboolean);
|
||||
jobject ToJBoolean(JNIEnv* env, bool value);
|
||||
|
||||
} // namespace Common::Android
|
@ -6,12 +6,12 @@
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#include "common/android/android_common.h"
|
||||
#include "common/android/applets/software_keyboard.h"
|
||||
#include "common/android/id_cache.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/string_util.h"
|
||||
#include "core/core.h"
|
||||
#include "jni/android_common/android_common.h"
|
||||
#include "jni/applets/software_keyboard.h"
|
||||
#include "jni/id_cache.h"
|
||||
|
||||
static jclass s_software_keyboard_class;
|
||||
static jclass s_keyboard_config_class;
|
||||
@ -19,10 +19,10 @@ static jclass s_keyboard_data_class;
|
||||
static jmethodID s_swkbd_execute_normal;
|
||||
static jmethodID s_swkbd_execute_inline;
|
||||
|
||||
namespace SoftwareKeyboard {
|
||||
namespace Common::Android::SoftwareKeyboard {
|
||||
|
||||
static jobject ToJKeyboardParams(const Core::Frontend::KeyboardInitializeParameters& config) {
|
||||
JNIEnv* env = IDCache::GetEnvForThread();
|
||||
JNIEnv* env = GetEnvForThread();
|
||||
jobject object = env->AllocObject(s_keyboard_config_class);
|
||||
|
||||
env->SetObjectField(object,
|
||||
@ -78,7 +78,7 @@ static jobject ToJKeyboardParams(const Core::Frontend::KeyboardInitializeParamet
|
||||
}
|
||||
|
||||
AndroidKeyboard::ResultData AndroidKeyboard::ResultData::CreateFromFrontend(jobject object) {
|
||||
JNIEnv* env = IDCache::GetEnvForThread();
|
||||
JNIEnv* env = GetEnvForThread();
|
||||
const jstring string = reinterpret_cast<jstring>(env->GetObjectField(
|
||||
object, env->GetFieldID(s_keyboard_data_class, "text", "Ljava/lang/String;")));
|
||||
return ResultData{GetJString(env, string),
|
||||
@ -141,7 +141,7 @@ void AndroidKeyboard::ShowNormalKeyboard() const {
|
||||
|
||||
// Pivot to a new thread, as we cannot call GetEnvForThread() from a Fiber.
|
||||
std::thread([&] {
|
||||
data = ResultData::CreateFromFrontend(IDCache::GetEnvForThread()->CallStaticObjectMethod(
|
||||
data = ResultData::CreateFromFrontend(GetEnvForThread()->CallStaticObjectMethod(
|
||||
s_software_keyboard_class, s_swkbd_execute_normal, ToJKeyboardParams(parameters)));
|
||||
}).join();
|
||||
|
||||
@ -183,8 +183,8 @@ void AndroidKeyboard::ShowInlineKeyboard(
|
||||
// Pivot to a new thread, as we cannot call GetEnvForThread() from a Fiber.
|
||||
m_is_inline_active = true;
|
||||
std::thread([&] {
|
||||
IDCache::GetEnvForThread()->CallStaticVoidMethod(
|
||||
s_software_keyboard_class, s_swkbd_execute_inline, ToJKeyboardParams(parameters));
|
||||
GetEnvForThread()->CallStaticVoidMethod(s_software_keyboard_class, s_swkbd_execute_inline,
|
||||
ToJKeyboardParams(parameters));
|
||||
}).join();
|
||||
}
|
||||
|
||||
@ -220,7 +220,7 @@ void AndroidKeyboard::SubmitInlineKeyboardText(std::u16string submitted_text) {
|
||||
m_current_text += submitted_text;
|
||||
|
||||
submit_inline_callback(Service::AM::Frontend::SwkbdReplyType::ChangedString, m_current_text,
|
||||
m_current_text.size());
|
||||
static_cast<int>(m_current_text.size()));
|
||||
}
|
||||
|
||||
void AndroidKeyboard::SubmitInlineKeyboardInput(int key_code) {
|
||||
@ -242,7 +242,7 @@ void AndroidKeyboard::SubmitInlineKeyboardInput(int key_code) {
|
||||
case KEYCODE_DEL:
|
||||
m_current_text.pop_back();
|
||||
submit_inline_callback(Service::AM::Frontend::SwkbdReplyType::ChangedString, m_current_text,
|
||||
m_current_text.size());
|
||||
static_cast<int>(m_current_text.size()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -274,4 +274,4 @@ void CleanupJNI(JNIEnv* env) {
|
||||
env->DeleteGlobalRef(s_keyboard_data_class);
|
||||
}
|
||||
|
||||
} // namespace SoftwareKeyboard
|
||||
} // namespace Common::Android::SoftwareKeyboard
|
@ -7,7 +7,7 @@
|
||||
|
||||
#include "core/frontend/applets/software_keyboard.h"
|
||||
|
||||
namespace SoftwareKeyboard {
|
||||
namespace Common::Android::SoftwareKeyboard {
|
||||
|
||||
class AndroidKeyboard final : public Core::Frontend::SoftwareKeyboardApplet {
|
||||
public:
|
||||
@ -66,7 +66,7 @@ void InitJNI(JNIEnv* env);
|
||||
// Should be called in JNI_Unload
|
||||
void CleanupJNI(JNIEnv* env);
|
||||
|
||||
} // namespace SoftwareKeyboard
|
||||
} // namespace Common::Android::SoftwareKeyboard
|
||||
|
||||
// Native function calls
|
||||
extern "C" {
|
@ -3,10 +3,10 @@
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#include "applets/software_keyboard.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/fs/fs_android.h"
|
||||
#include "jni/applets/software_keyboard.h"
|
||||
#include "jni/id_cache.h"
|
||||
#include "id_cache.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
|
||||
static JavaVM* s_java_vm;
|
||||
@ -67,7 +67,7 @@ static jfieldID s_boolean_value_field;
|
||||
|
||||
static constexpr jint JNI_VERSION = JNI_VERSION_1_6;
|
||||
|
||||
namespace IDCache {
|
||||
namespace Common::Android {
|
||||
|
||||
JNIEnv* GetEnvForThread() {
|
||||
thread_local static struct OwnedEnv {
|
||||
@ -276,8 +276,6 @@ jfieldID GetBooleanValueField() {
|
||||
return s_boolean_value_field;
|
||||
}
|
||||
|
||||
} // namespace IDCache
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@ -393,7 +391,7 @@ jint JNI_OnLoad(JavaVM* vm, void* reserved) {
|
||||
Common::FS::Android::RegisterCallbacks(env, s_native_library_class);
|
||||
|
||||
// Initialize applets
|
||||
SoftwareKeyboard::InitJNI(env);
|
||||
Common::Android::SoftwareKeyboard::InitJNI(env);
|
||||
|
||||
return JNI_VERSION;
|
||||
}
|
||||
@ -426,3 +424,5 @@ void JNI_OnUnload(JavaVM* vm, void* reserved) {
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace Common::Android
|
@ -3,20 +3,40 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <future>
|
||||
#include <jni.h>
|
||||
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
|
||||
namespace IDCache {
|
||||
namespace Common::Android {
|
||||
|
||||
JNIEnv* GetEnvForThread();
|
||||
|
||||
/**
|
||||
* Starts a new thread to run JNI. Intended to be used when you must run JNI from a fiber.
|
||||
* @tparam T Typename of the return value for the work param
|
||||
* @param work Lambda that runs JNI code. This function will take care of attaching this thread to
|
||||
* the JVM
|
||||
* @return The result from the work lambda param
|
||||
*/
|
||||
template <typename T = void>
|
||||
T RunJNIOnFiber(const std::function<T(JNIEnv*)>& work) {
|
||||
std::future<T> j_result = std::async(std::launch::async, [&] {
|
||||
auto env = GetEnvForThread();
|
||||
return work(env);
|
||||
});
|
||||
return j_result.get();
|
||||
}
|
||||
|
||||
jclass GetNativeLibraryClass();
|
||||
|
||||
jclass GetDiskCacheProgressClass();
|
||||
jclass GetDiskCacheLoadCallbackStageClass();
|
||||
jclass GetGameDirClass();
|
||||
jmethodID GetGameDirConstructor();
|
||||
jmethodID GetExitEmulationActivity();
|
||||
jmethodID GetDiskCacheLoadProgress();
|
||||
|
||||
jmethodID GetExitEmulationActivity();
|
||||
jmethodID GetOnEmulationStarted();
|
||||
jmethodID GetOnEmulationStopped();
|
||||
jmethodID GetOnProgramChanged();
|
||||
@ -65,4 +85,4 @@ jclass GetBooleanClass();
|
||||
jmethodID GetBooleanConstructor();
|
||||
jfieldID GetBooleanValueField();
|
||||
|
||||
} // namespace IDCache
|
||||
} // namespace Common::Android
|
@ -1,63 +1,38 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/android/android_common.h"
|
||||
#include "common/android/id_cache.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/fs/fs_android.h"
|
||||
#include "common/string_util.h"
|
||||
|
||||
namespace Common::FS::Android {
|
||||
|
||||
JNIEnv* GetEnvForThread() {
|
||||
thread_local static struct OwnedEnv {
|
||||
OwnedEnv() {
|
||||
status = g_jvm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6);
|
||||
if (status == JNI_EDETACHED)
|
||||
g_jvm->AttachCurrentThread(&env, nullptr);
|
||||
}
|
||||
|
||||
~OwnedEnv() {
|
||||
if (status == JNI_EDETACHED)
|
||||
g_jvm->DetachCurrentThread();
|
||||
}
|
||||
|
||||
int status;
|
||||
JNIEnv* env = nullptr;
|
||||
} owned;
|
||||
return owned.env;
|
||||
}
|
||||
|
||||
void RegisterCallbacks(JNIEnv* env, jclass clazz) {
|
||||
env->GetJavaVM(&g_jvm);
|
||||
native_library = clazz;
|
||||
|
||||
#define FH(FunctionName, JMethodID, Caller, JMethodName, Signature) \
|
||||
F(JMethodID, JMethodName, Signature)
|
||||
#define FR(FunctionName, ReturnValue, JMethodID, Caller, JMethodName, Signature) \
|
||||
F(JMethodID, JMethodName, Signature)
|
||||
#define FS(FunctionName, ReturnValue, Parameters, JMethodID, JMethodName, Signature) \
|
||||
F(JMethodID, JMethodName, Signature)
|
||||
#define F(JMethodID, JMethodName, Signature) \
|
||||
JMethodID = env->GetStaticMethodID(native_library, JMethodName, Signature);
|
||||
ANDROID_SINGLE_PATH_HELPER_FUNCTIONS(FH)
|
||||
ANDROID_SINGLE_PATH_DETERMINE_FUNCTIONS(FR)
|
||||
ANDROID_STORAGE_FUNCTIONS(FS)
|
||||
#undef F
|
||||
#undef FS
|
||||
#undef FR
|
||||
#undef FH
|
||||
s_get_parent_directory = env->GetStaticMethodID(native_library, "getParentDirectory",
|
||||
"(Ljava/lang/String;)Ljava/lang/String;");
|
||||
s_get_filename = env->GetStaticMethodID(native_library, "getFilename",
|
||||
"(Ljava/lang/String;)Ljava/lang/String;");
|
||||
s_get_size = env->GetStaticMethodID(native_library, "getSize", "(Ljava/lang/String;)J");
|
||||
s_is_directory = env->GetStaticMethodID(native_library, "isDirectory", "(Ljava/lang/String;)Z");
|
||||
s_file_exists = env->GetStaticMethodID(native_library, "exists", "(Ljava/lang/String;)Z");
|
||||
s_open_content_uri = env->GetStaticMethodID(native_library, "openContentUri",
|
||||
"(Ljava/lang/String;Ljava/lang/String;)I");
|
||||
}
|
||||
|
||||
void UnRegisterCallbacks() {
|
||||
#define FH(FunctionName, JMethodID, Caller, JMethodName, Signature) F(JMethodID)
|
||||
#define FR(FunctionName, ReturnValue, JMethodID, Caller, JMethodName, Signature) F(JMethodID)
|
||||
#define FS(FunctionName, ReturnValue, Parameters, JMethodID, JMethodName, Signature) F(JMethodID)
|
||||
#define F(JMethodID) JMethodID = nullptr;
|
||||
ANDROID_SINGLE_PATH_HELPER_FUNCTIONS(FH)
|
||||
ANDROID_SINGLE_PATH_DETERMINE_FUNCTIONS(FR)
|
||||
ANDROID_STORAGE_FUNCTIONS(FS)
|
||||
#undef F
|
||||
#undef FS
|
||||
#undef FR
|
||||
#undef FH
|
||||
s_get_parent_directory = nullptr;
|
||||
s_get_filename = nullptr;
|
||||
|
||||
s_get_size = nullptr;
|
||||
s_is_directory = nullptr;
|
||||
s_file_exists = nullptr;
|
||||
|
||||
s_open_content_uri = nullptr;
|
||||
}
|
||||
|
||||
bool IsContentUri(const std::string& path) {
|
||||
@ -70,7 +45,7 @@ bool IsContentUri(const std::string& path) {
|
||||
}
|
||||
|
||||
int OpenContentUri(const std::string& filepath, OpenMode openmode) {
|
||||
if (open_content_uri == nullptr)
|
||||
if (s_open_content_uri == nullptr)
|
||||
return -1;
|
||||
|
||||
const char* mode = "";
|
||||
@ -82,50 +57,66 @@ int OpenContentUri(const std::string& filepath, OpenMode openmode) {
|
||||
UNIMPLEMENTED();
|
||||
return -1;
|
||||
}
|
||||
auto env = GetEnvForThread();
|
||||
jstring j_filepath = env->NewStringUTF(filepath.c_str());
|
||||
jstring j_mode = env->NewStringUTF(mode);
|
||||
return env->CallStaticIntMethod(native_library, open_content_uri, j_filepath, j_mode);
|
||||
auto env = Common::Android::GetEnvForThread();
|
||||
jstring j_filepath = Common::Android::ToJString(env, filepath);
|
||||
jstring j_mode = Common::Android::ToJString(env, mode);
|
||||
return env->CallStaticIntMethod(native_library, s_open_content_uri, j_filepath, j_mode);
|
||||
}
|
||||
|
||||
#define FR(FunctionName, ReturnValue, JMethodID, Caller, JMethodName, Signature) \
|
||||
F(FunctionName, ReturnValue, JMethodID, Caller)
|
||||
#define F(FunctionName, ReturnValue, JMethodID, Caller) \
|
||||
ReturnValue FunctionName(const std::string& filepath) { \
|
||||
if (JMethodID == nullptr) { \
|
||||
return 0; \
|
||||
} \
|
||||
auto env = GetEnvForThread(); \
|
||||
jstring j_filepath = env->NewStringUTF(filepath.c_str()); \
|
||||
return env->Caller(native_library, JMethodID, j_filepath); \
|
||||
std::uint64_t GetSize(const std::string& filepath) {
|
||||
if (s_get_size == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
ANDROID_SINGLE_PATH_DETERMINE_FUNCTIONS(FR)
|
||||
#undef F
|
||||
#undef FR
|
||||
auto env = Common::Android::GetEnvForThread();
|
||||
return static_cast<u64>(env->CallStaticLongMethod(
|
||||
native_library, s_get_size,
|
||||
Common::Android::ToJString(Common::Android::GetEnvForThread(), filepath)));
|
||||
}
|
||||
|
||||
#define FH(FunctionName, JMethodID, Caller, JMethodName, Signature) \
|
||||
F(FunctionName, JMethodID, Caller)
|
||||
#define F(FunctionName, JMethodID, Caller) \
|
||||
std::string FunctionName(const std::string& filepath) { \
|
||||
if (JMethodID == nullptr) { \
|
||||
return 0; \
|
||||
} \
|
||||
auto env = GetEnvForThread(); \
|
||||
jstring j_filepath = env->NewStringUTF(filepath.c_str()); \
|
||||
jstring j_return = \
|
||||
static_cast<jstring>(env->Caller(native_library, JMethodID, j_filepath)); \
|
||||
if (!j_return) { \
|
||||
return {}; \
|
||||
} \
|
||||
const jchar* jchars = env->GetStringChars(j_return, nullptr); \
|
||||
const jsize length = env->GetStringLength(j_return); \
|
||||
const std::u16string_view string_view(reinterpret_cast<const char16_t*>(jchars), length); \
|
||||
const std::string converted_string = Common::UTF16ToUTF8(string_view); \
|
||||
env->ReleaseStringChars(j_return, jchars); \
|
||||
return converted_string; \
|
||||
bool IsDirectory(const std::string& filepath) {
|
||||
if (s_is_directory == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
ANDROID_SINGLE_PATH_HELPER_FUNCTIONS(FH)
|
||||
#undef F
|
||||
#undef FH
|
||||
auto env = Common::Android::GetEnvForThread();
|
||||
return env->CallStaticBooleanMethod(
|
||||
native_library, s_is_directory,
|
||||
Common::Android::ToJString(Common::Android::GetEnvForThread(), filepath));
|
||||
}
|
||||
|
||||
bool Exists(const std::string& filepath) {
|
||||
if (s_file_exists == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
auto env = Common::Android::GetEnvForThread();
|
||||
return env->CallStaticBooleanMethod(
|
||||
native_library, s_file_exists,
|
||||
Common::Android::ToJString(Common::Android::GetEnvForThread(), filepath));
|
||||
}
|
||||
|
||||
std::string GetParentDirectory(const std::string& filepath) {
|
||||
if (s_get_parent_directory == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
auto env = Common::Android::GetEnvForThread();
|
||||
jstring j_return = static_cast<jstring>(env->CallStaticObjectMethod(
|
||||
native_library, s_get_parent_directory, Common::Android::ToJString(env, filepath)));
|
||||
if (!j_return) {
|
||||
return {};
|
||||
}
|
||||
return Common::Android::GetJString(env, j_return);
|
||||
}
|
||||
|
||||
std::string GetFilename(const std::string& filepath) {
|
||||
if (s_get_filename == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
auto env = Common::Android::GetEnvForThread();
|
||||
jstring j_return = static_cast<jstring>(env->CallStaticObjectMethod(
|
||||
native_library, s_get_filename, Common::Android::ToJString(env, filepath)));
|
||||
if (!j_return) {
|
||||
return {};
|
||||
}
|
||||
return Common::Android::GetJString(env, j_return);
|
||||
}
|
||||
|
||||
} // namespace Common::FS::Android
|
||||
|
@ -7,38 +7,17 @@
|
||||
#include <vector>
|
||||
#include <jni.h>
|
||||
|
||||
#define ANDROID_STORAGE_FUNCTIONS(V) \
|
||||
V(OpenContentUri, int, (const std::string& filepath, OpenMode openmode), open_content_uri, \
|
||||
"openContentUri", "(Ljava/lang/String;Ljava/lang/String;)I")
|
||||
|
||||
#define ANDROID_SINGLE_PATH_DETERMINE_FUNCTIONS(V) \
|
||||
V(GetSize, std::uint64_t, get_size, CallStaticLongMethod, "getSize", "(Ljava/lang/String;)J") \
|
||||
V(IsDirectory, bool, is_directory, CallStaticBooleanMethod, "isDirectory", \
|
||||
"(Ljava/lang/String;)Z") \
|
||||
V(Exists, bool, file_exists, CallStaticBooleanMethod, "exists", "(Ljava/lang/String;)Z")
|
||||
|
||||
#define ANDROID_SINGLE_PATH_HELPER_FUNCTIONS(V) \
|
||||
V(GetParentDirectory, get_parent_directory, CallStaticObjectMethod, "getParentDirectory", \
|
||||
"(Ljava/lang/String;)Ljava/lang/String;") \
|
||||
V(GetFilename, get_filename, CallStaticObjectMethod, "getFilename", \
|
||||
"(Ljava/lang/String;)Ljava/lang/String;")
|
||||
|
||||
namespace Common::FS::Android {
|
||||
|
||||
static JavaVM* g_jvm = nullptr;
|
||||
static jclass native_library = nullptr;
|
||||
|
||||
#define FH(FunctionName, JMethodID, Caller, JMethodName, Signature) F(JMethodID)
|
||||
#define FR(FunctionName, ReturnValue, JMethodID, Caller, JMethodName, Signature) F(JMethodID)
|
||||
#define FS(FunctionName, ReturnValue, Parameters, JMethodID, JMethodName, Signature) F(JMethodID)
|
||||
#define F(JMethodID) static jmethodID JMethodID = nullptr;
|
||||
ANDROID_SINGLE_PATH_HELPER_FUNCTIONS(FH)
|
||||
ANDROID_SINGLE_PATH_DETERMINE_FUNCTIONS(FR)
|
||||
ANDROID_STORAGE_FUNCTIONS(FS)
|
||||
#undef F
|
||||
#undef FS
|
||||
#undef FR
|
||||
#undef FH
|
||||
static jmethodID s_get_parent_directory;
|
||||
static jmethodID s_get_filename;
|
||||
static jmethodID s_get_size;
|
||||
static jmethodID s_is_directory;
|
||||
static jmethodID s_file_exists;
|
||||
static jmethodID s_open_content_uri;
|
||||
|
||||
enum class OpenMode {
|
||||
Read,
|
||||
@ -57,24 +36,11 @@ void UnRegisterCallbacks();
|
||||
|
||||
bool IsContentUri(const std::string& path);
|
||||
|
||||
#define FS(FunctionName, ReturnValue, Parameters, JMethodID, JMethodName, Signature) \
|
||||
F(FunctionName, Parameters, ReturnValue)
|
||||
#define F(FunctionName, Parameters, ReturnValue) ReturnValue FunctionName Parameters;
|
||||
ANDROID_STORAGE_FUNCTIONS(FS)
|
||||
#undef F
|
||||
#undef FS
|
||||
|
||||
#define FR(FunctionName, ReturnValue, JMethodID, Caller, JMethodName, Signature) \
|
||||
F(FunctionName, ReturnValue)
|
||||
#define F(FunctionName, ReturnValue) ReturnValue FunctionName(const std::string& filepath);
|
||||
ANDROID_SINGLE_PATH_DETERMINE_FUNCTIONS(FR)
|
||||
#undef F
|
||||
#undef FR
|
||||
|
||||
#define FH(FunctionName, JMethodID, Caller, JMethodName, Signature) F(FunctionName)
|
||||
#define F(FunctionName) std::string FunctionName(const std::string& filepath);
|
||||
ANDROID_SINGLE_PATH_HELPER_FUNCTIONS(FH)
|
||||
#undef F
|
||||
#undef FH
|
||||
int OpenContentUri(const std::string& filepath, OpenMode openmode);
|
||||
std::uint64_t GetSize(const std::string& filepath);
|
||||
bool IsDirectory(const std::string& filepath);
|
||||
bool Exists(const std::string& filepath);
|
||||
std::string GetParentDirectory(const std::string& filepath);
|
||||
std::string GetFilename(const std::string& filepath);
|
||||
|
||||
} // namespace Common::FS::Android
|
||||
|
73
src/common/range_sets.h
Normal file
73
src/common/range_sets.h
Normal file
@ -0,0 +1,73 @@
|
||||
// SPDX-FileCopyrightText: 2024 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
template <typename AddressType>
|
||||
class RangeSet {
|
||||
public:
|
||||
RangeSet();
|
||||
~RangeSet();
|
||||
|
||||
RangeSet(RangeSet const&) = delete;
|
||||
RangeSet& operator=(RangeSet const&) = delete;
|
||||
|
||||
RangeSet(RangeSet&& other);
|
||||
RangeSet& operator=(RangeSet&& other);
|
||||
|
||||
void Add(AddressType base_address, size_t size);
|
||||
void Subtract(AddressType base_address, size_t size);
|
||||
void Clear();
|
||||
bool Empty() const;
|
||||
|
||||
template <typename Func>
|
||||
void ForEach(Func&& func) const;
|
||||
|
||||
template <typename Func>
|
||||
void ForEachInRange(AddressType device_addr, size_t size, Func&& func) const;
|
||||
|
||||
private:
|
||||
struct RangeSetImpl;
|
||||
std::unique_ptr<RangeSetImpl> m_impl;
|
||||
};
|
||||
|
||||
template <typename AddressType>
|
||||
class OverlapRangeSet {
|
||||
public:
|
||||
OverlapRangeSet();
|
||||
~OverlapRangeSet();
|
||||
|
||||
OverlapRangeSet(OverlapRangeSet const&) = delete;
|
||||
OverlapRangeSet& operator=(OverlapRangeSet const&) = delete;
|
||||
|
||||
OverlapRangeSet(OverlapRangeSet&& other);
|
||||
OverlapRangeSet& operator=(OverlapRangeSet&& other);
|
||||
|
||||
void Add(AddressType base_address, size_t size);
|
||||
void Subtract(AddressType base_address, size_t size);
|
||||
|
||||
template <typename Func>
|
||||
void Subtract(AddressType base_address, size_t size, Func&& on_delete);
|
||||
|
||||
void DeleteAll(AddressType base_address, size_t size);
|
||||
void Clear();
|
||||
bool Empty() const;
|
||||
|
||||
template <typename Func>
|
||||
void ForEach(Func&& func) const;
|
||||
|
||||
template <typename Func>
|
||||
void ForEachInRange(AddressType device_addr, size_t size, Func&& func) const;
|
||||
|
||||
private:
|
||||
struct OverlapRangeSetImpl;
|
||||
std::unique_ptr<OverlapRangeSetImpl> m_impl;
|
||||
};
|
||||
|
||||
} // namespace Common
|
304
src/common/range_sets.inc
Normal file
304
src/common/range_sets.inc
Normal file
@ -0,0 +1,304 @@
|
||||
// SPDX-FileCopyrightText: 2024 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <limits>
|
||||
#include <utility>
|
||||
|
||||
#include <boost/icl/interval.hpp>
|
||||
#include <boost/icl/interval_base_set.hpp>
|
||||
#include <boost/icl/interval_map.hpp>
|
||||
#include <boost/icl/interval_set.hpp>
|
||||
#include <boost/icl/split_interval_map.hpp>
|
||||
#include <boost/pool/pool.hpp>
|
||||
#include <boost/pool/pool_alloc.hpp>
|
||||
#include <boost/pool/poolfwd.hpp>
|
||||
|
||||
#include "common/range_sets.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
namespace {
|
||||
template <class T>
|
||||
using RangeSetsAllocator =
|
||||
boost::fast_pool_allocator<T, boost::default_user_allocator_new_delete,
|
||||
boost::details::pool::default_mutex, 1024, 2048>;
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
struct RangeSet<AddressType>::RangeSetImpl {
|
||||
using IntervalSet = boost::icl::interval_set<
|
||||
AddressType, std::less, ICL_INTERVAL_INSTANCE(ICL_INTERVAL_DEFAULT, AddressType, std::less),
|
||||
RangeSetsAllocator>;
|
||||
using IntervalType = typename IntervalSet::interval_type;
|
||||
|
||||
RangeSetImpl() = default;
|
||||
~RangeSetImpl() = default;
|
||||
|
||||
void Add(AddressType base_address, size_t size) {
|
||||
AddressType end_address = base_address + static_cast<AddressType>(size);
|
||||
IntervalType interval{base_address, end_address};
|
||||
m_ranges_set.add(interval);
|
||||
}
|
||||
|
||||
void Subtract(AddressType base_address, size_t size) {
|
||||
AddressType end_address = base_address + static_cast<AddressType>(size);
|
||||
IntervalType interval{base_address, end_address};
|
||||
m_ranges_set.subtract(interval);
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void ForEach(Func&& func) const {
|
||||
if (m_ranges_set.empty()) {
|
||||
return;
|
||||
}
|
||||
auto it = m_ranges_set.begin();
|
||||
auto end_it = m_ranges_set.end();
|
||||
for (; it != end_it; it++) {
|
||||
const AddressType inter_addr_end = it->upper();
|
||||
const AddressType inter_addr = it->lower();
|
||||
func(inter_addr, inter_addr_end);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void ForEachInRange(AddressType base_addr, size_t size, Func&& func) const {
|
||||
if (m_ranges_set.empty()) {
|
||||
return;
|
||||
}
|
||||
const AddressType start_address = base_addr;
|
||||
const AddressType end_address = start_address + size;
|
||||
const RangeSetImpl::IntervalType search_interval{start_address, end_address};
|
||||
auto it = m_ranges_set.lower_bound(search_interval);
|
||||
if (it == m_ranges_set.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = m_ranges_set.upper_bound(search_interval);
|
||||
for (; it != end_it; it++) {
|
||||
AddressType inter_addr_end = it->upper();
|
||||
AddressType inter_addr = it->lower();
|
||||
if (inter_addr_end > end_address) {
|
||||
inter_addr_end = end_address;
|
||||
}
|
||||
if (inter_addr < start_address) {
|
||||
inter_addr = start_address;
|
||||
}
|
||||
func(inter_addr, inter_addr_end);
|
||||
}
|
||||
}
|
||||
|
||||
IntervalSet m_ranges_set;
|
||||
};
|
||||
|
||||
template <typename AddressType>
|
||||
struct OverlapRangeSet<AddressType>::OverlapRangeSetImpl {
|
||||
using IntervalSet = boost::icl::split_interval_map<
|
||||
AddressType, s32, boost::icl::partial_enricher, std::less, boost::icl::inplace_plus,
|
||||
boost::icl::inter_section,
|
||||
ICL_INTERVAL_INSTANCE(ICL_INTERVAL_DEFAULT, AddressType, std::less), RangeSetsAllocator>;
|
||||
using IntervalType = typename IntervalSet::interval_type;
|
||||
|
||||
OverlapRangeSetImpl() = default;
|
||||
~OverlapRangeSetImpl() = default;
|
||||
|
||||
void Add(AddressType base_address, size_t size) {
|
||||
AddressType end_address = base_address + static_cast<AddressType>(size);
|
||||
IntervalType interval{base_address, end_address};
|
||||
m_split_ranges_set += std::make_pair(interval, 1);
|
||||
}
|
||||
|
||||
template <bool has_on_delete, typename Func>
|
||||
void Subtract(AddressType base_address, size_t size, s32 amount,
|
||||
[[maybe_unused]] Func&& on_delete) {
|
||||
if (m_split_ranges_set.empty()) {
|
||||
return;
|
||||
}
|
||||
AddressType end_address = base_address + static_cast<AddressType>(size);
|
||||
IntervalType interval{base_address, end_address};
|
||||
bool any_removals = false;
|
||||
m_split_ranges_set += std::make_pair(interval, -amount);
|
||||
do {
|
||||
any_removals = false;
|
||||
auto it = m_split_ranges_set.lower_bound(interval);
|
||||
if (it == m_split_ranges_set.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = m_split_ranges_set.upper_bound(interval);
|
||||
for (; it != end_it; it++) {
|
||||
if (it->second <= 0) {
|
||||
if constexpr (has_on_delete) {
|
||||
if (it->second == 0) {
|
||||
on_delete(it->first.lower(), it->first.upper());
|
||||
}
|
||||
}
|
||||
any_removals = true;
|
||||
m_split_ranges_set.erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} while (any_removals);
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void ForEach(Func&& func) const {
|
||||
if (m_split_ranges_set.empty()) {
|
||||
return;
|
||||
}
|
||||
auto it = m_split_ranges_set.begin();
|
||||
auto end_it = m_split_ranges_set.end();
|
||||
for (; it != end_it; it++) {
|
||||
const AddressType inter_addr_end = it->first.upper();
|
||||
const AddressType inter_addr = it->first.lower();
|
||||
func(inter_addr, inter_addr_end, it->second);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void ForEachInRange(AddressType base_address, size_t size, Func&& func) const {
|
||||
if (m_split_ranges_set.empty()) {
|
||||
return;
|
||||
}
|
||||
const AddressType start_address = base_address;
|
||||
const AddressType end_address = start_address + size;
|
||||
const OverlapRangeSetImpl::IntervalType search_interval{start_address, end_address};
|
||||
auto it = m_split_ranges_set.lower_bound(search_interval);
|
||||
if (it == m_split_ranges_set.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = m_split_ranges_set.upper_bound(search_interval);
|
||||
for (; it != end_it; it++) {
|
||||
auto& inter = it->first;
|
||||
AddressType inter_addr_end = inter.upper();
|
||||
AddressType inter_addr = inter.lower();
|
||||
if (inter_addr_end > end_address) {
|
||||
inter_addr_end = end_address;
|
||||
}
|
||||
if (inter_addr < start_address) {
|
||||
inter_addr = start_address;
|
||||
}
|
||||
func(inter_addr, inter_addr_end, it->second);
|
||||
}
|
||||
}
|
||||
|
||||
IntervalSet m_split_ranges_set;
|
||||
};
|
||||
|
||||
template <typename AddressType>
|
||||
RangeSet<AddressType>::RangeSet() {
|
||||
m_impl = std::make_unique<RangeSet<AddressType>::RangeSetImpl>();
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
RangeSet<AddressType>::~RangeSet() = default;
|
||||
|
||||
template <typename AddressType>
|
||||
RangeSet<AddressType>::RangeSet(RangeSet&& other) {
|
||||
m_impl = std::make_unique<RangeSet<AddressType>::RangeSetImpl>();
|
||||
m_impl->m_ranges_set = std::move(other.m_impl->m_ranges_set);
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
RangeSet<AddressType>& RangeSet<AddressType>::operator=(RangeSet&& other) {
|
||||
m_impl->m_ranges_set = std::move(other.m_impl->m_ranges_set);
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
void RangeSet<AddressType>::Add(AddressType base_address, size_t size) {
|
||||
m_impl->Add(base_address, size);
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
void RangeSet<AddressType>::Subtract(AddressType base_address, size_t size) {
|
||||
m_impl->Subtract(base_address, size);
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
void RangeSet<AddressType>::Clear() {
|
||||
m_impl->m_ranges_set.clear();
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
bool RangeSet<AddressType>::Empty() const {
|
||||
return m_impl->m_ranges_set.empty();
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
template <typename Func>
|
||||
void RangeSet<AddressType>::ForEach(Func&& func) const {
|
||||
m_impl->ForEach(std::move(func));
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
template <typename Func>
|
||||
void RangeSet<AddressType>::ForEachInRange(AddressType base_address, size_t size,
|
||||
Func&& func) const {
|
||||
m_impl->ForEachInRange(base_address, size, std::move(func));
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
OverlapRangeSet<AddressType>::OverlapRangeSet() {
|
||||
m_impl = std::make_unique<OverlapRangeSet<AddressType>::OverlapRangeSetImpl>();
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
OverlapRangeSet<AddressType>::~OverlapRangeSet() = default;
|
||||
|
||||
template <typename AddressType>
|
||||
OverlapRangeSet<AddressType>::OverlapRangeSet(OverlapRangeSet&& other) {
|
||||
m_impl = std::make_unique<OverlapRangeSet<AddressType>::OverlapRangeSetImpl>();
|
||||
m_impl->m_split_ranges_set = std::move(other.m_impl->m_split_ranges_set);
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
OverlapRangeSet<AddressType>& OverlapRangeSet<AddressType>::operator=(OverlapRangeSet&& other) {
|
||||
m_impl->m_split_ranges_set = std::move(other.m_impl->m_split_ranges_set);
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
void OverlapRangeSet<AddressType>::Add(AddressType base_address, size_t size) {
|
||||
m_impl->Add(base_address, size);
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
void OverlapRangeSet<AddressType>::Subtract(AddressType base_address, size_t size) {
|
||||
m_impl->template Subtract<false>(base_address, size, 1, [](AddressType, AddressType) {});
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
template <typename Func>
|
||||
void OverlapRangeSet<AddressType>::Subtract(AddressType base_address, size_t size,
|
||||
Func&& on_delete) {
|
||||
m_impl->template Subtract<true, Func>(base_address, size, 1, std::move(on_delete));
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
void OverlapRangeSet<AddressType>::DeleteAll(AddressType base_address, size_t size) {
|
||||
m_impl->template Subtract<false>(base_address, size, std::numeric_limits<s32>::max(),
|
||||
[](AddressType, AddressType) {});
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
void OverlapRangeSet<AddressType>::Clear() {
|
||||
m_impl->m_split_ranges_set.clear();
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
bool OverlapRangeSet<AddressType>::Empty() const {
|
||||
return m_impl->m_split_ranges_set.empty();
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
template <typename Func>
|
||||
void OverlapRangeSet<AddressType>::ForEach(Func&& func) const {
|
||||
m_impl->ForEach(func);
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
template <typename Func>
|
||||
void OverlapRangeSet<AddressType>::ForEachInRange(AddressType base_address, size_t size,
|
||||
Func&& func) const {
|
||||
m_impl->ForEachInRange(base_address, size, std::move(func));
|
||||
}
|
||||
|
||||
} // namespace Common
|
@ -30,6 +30,7 @@ namespace Settings {
|
||||
#define SETTING(TYPE, RANGED) template class Setting<TYPE, RANGED>
|
||||
#define SWITCHABLE(TYPE, RANGED) template class SwitchableSetting<TYPE, RANGED>
|
||||
|
||||
SETTING(AppletMode, false);
|
||||
SETTING(AudioEngine, false);
|
||||
SETTING(bool, false);
|
||||
SETTING(int, false);
|
||||
@ -215,6 +216,8 @@ const char* TranslateCategory(Category category) {
|
||||
return "Debugging";
|
||||
case Category::GpuDriver:
|
||||
return "GpuDriver";
|
||||
case Category::LibraryApplet:
|
||||
return "LibraryApplet";
|
||||
case Category::Miscellaneous:
|
||||
return "Miscellaneous";
|
||||
case Category::Network:
|
||||
|
@ -133,6 +133,38 @@ struct TouchFromButtonMap {
|
||||
struct Values {
|
||||
Linkage linkage{};
|
||||
|
||||
// Applet
|
||||
Setting<AppletMode> cabinet_applet_mode{linkage, AppletMode::LLE, "cabinet_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
Setting<AppletMode> controller_applet_mode{linkage, AppletMode::HLE, "controller_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
Setting<AppletMode> data_erase_applet_mode{linkage, AppletMode::HLE, "data_erase_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
Setting<AppletMode> error_applet_mode{linkage, AppletMode::HLE, "error_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
Setting<AppletMode> net_connect_applet_mode{linkage, AppletMode::HLE, "net_connect_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
Setting<AppletMode> player_select_applet_mode{
|
||||
linkage, AppletMode::HLE, "player_select_applet_mode", Category::LibraryApplet};
|
||||
Setting<AppletMode> swkbd_applet_mode{linkage, AppletMode::LLE, "swkbd_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
Setting<AppletMode> mii_edit_applet_mode{linkage, AppletMode::LLE, "mii_edit_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
Setting<AppletMode> web_applet_mode{linkage, AppletMode::HLE, "web_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
Setting<AppletMode> shop_applet_mode{linkage, AppletMode::HLE, "shop_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
Setting<AppletMode> photo_viewer_applet_mode{
|
||||
linkage, AppletMode::LLE, "photo_viewer_applet_mode", Category::LibraryApplet};
|
||||
Setting<AppletMode> offline_web_applet_mode{linkage, AppletMode::LLE, "offline_web_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
Setting<AppletMode> login_share_applet_mode{linkage, AppletMode::HLE, "login_share_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
Setting<AppletMode> wifi_web_auth_applet_mode{
|
||||
linkage, AppletMode::HLE, "wifi_web_auth_applet_mode", Category::LibraryApplet};
|
||||
Setting<AppletMode> my_page_applet_mode{linkage, AppletMode::LLE, "my_page_applet_mode",
|
||||
Category::LibraryApplet};
|
||||
|
||||
// Audio
|
||||
SwitchableSetting<AudioEngine> sink_id{linkage, AudioEngine::Auto, "output_engine",
|
||||
Category::Audio, Specialization::RuntimeList};
|
||||
|
@ -44,6 +44,7 @@ enum class Category : u32 {
|
||||
Services,
|
||||
Paths,
|
||||
Linux,
|
||||
LibraryApplet,
|
||||
MaxEnum,
|
||||
};
|
||||
|
||||
|
@ -151,6 +151,8 @@ ENUM(AspectRatio, R16_9, R4_3, R21_9, R16_10, Stretch);
|
||||
|
||||
ENUM(ConsoleMode, Handheld, Docked);
|
||||
|
||||
ENUM(AppletMode, HLE, LLE);
|
||||
|
||||
template <typename Type>
|
||||
inline std::string CanonicalizeEnum(Type id) {
|
||||
const auto group = EnumMetadata<Type>::Canonicalizations();
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/polyfill_ranges.h"
|
||||
|
||||
namespace VideoCommon {
|
||||
namespace Common {
|
||||
|
||||
struct SlotId {
|
||||
static constexpr u32 INVALID_INDEX = std::numeric_limits<u32>::max();
|
||||
@ -217,11 +217,11 @@ private:
|
||||
std::vector<u32> free_list;
|
||||
};
|
||||
|
||||
} // namespace VideoCommon
|
||||
} // namespace Common
|
||||
|
||||
template <>
|
||||
struct std::hash<VideoCommon::SlotId> {
|
||||
size_t operator()(const VideoCommon::SlotId& id) const noexcept {
|
||||
struct std::hash<Common::SlotId> {
|
||||
size_t operator()(const Common::SlotId& id) const noexcept {
|
||||
return std::hash<u32>{}(id.index);
|
||||
}
|
||||
};
|
@ -43,6 +43,8 @@ public:
|
||||
DeviceMemoryManager(const DeviceMemory& device_memory);
|
||||
~DeviceMemoryManager();
|
||||
|
||||
static constexpr bool HAS_FLUSH_INVALIDATION = true;
|
||||
|
||||
void BindInterface(DeviceInterface* device_inter);
|
||||
|
||||
DAddr Allocate(size_t size);
|
||||
|
@ -44,15 +44,32 @@ public:
|
||||
GuestMemory() = delete;
|
||||
explicit GuestMemory(M& memory, u64 addr, std::size_t size,
|
||||
Common::ScratchBuffer<T>* backup = nullptr)
|
||||
: m_memory{memory}, m_addr{addr}, m_size{size} {
|
||||
: m_memory{&memory}, m_addr{addr}, m_size{size} {
|
||||
static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write);
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Read) {
|
||||
if constexpr (!(FLAGS & GuestMemoryFlags::Read)) {
|
||||
if (!this->TrySetSpan()) {
|
||||
if (backup) {
|
||||
backup->resize_destructive(this->size());
|
||||
m_data_span = *backup;
|
||||
m_span_valid = true;
|
||||
m_is_data_copy = true;
|
||||
} else {
|
||||
m_data_copy.resize(this->size());
|
||||
m_data_span = std::span(m_data_copy);
|
||||
m_span_valid = true;
|
||||
m_is_data_copy = true;
|
||||
}
|
||||
}
|
||||
} else if constexpr (FLAGS & GuestMemoryFlags::Read) {
|
||||
Read(addr, size, backup);
|
||||
}
|
||||
}
|
||||
|
||||
~GuestMemory() = default;
|
||||
|
||||
GuestMemory(GuestMemory&& rhs) = default;
|
||||
GuestMemory& operator=(GuestMemory&& rhs) = default;
|
||||
|
||||
T* data() noexcept {
|
||||
return m_data_span.data();
|
||||
}
|
||||
@ -109,8 +126,8 @@ public:
|
||||
}
|
||||
|
||||
if (this->TrySetSpan()) {
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
m_memory.FlushRegion(m_addr, this->size_bytes());
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Safe && M::HAS_FLUSH_INVALIDATION) {
|
||||
m_memory->FlushRegion(m_addr, this->size_bytes());
|
||||
}
|
||||
} else {
|
||||
if (backup) {
|
||||
@ -123,9 +140,9 @@ public:
|
||||
m_is_data_copy = true;
|
||||
m_span_valid = true;
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
m_memory.ReadBlock(m_addr, this->data(), this->size_bytes());
|
||||
m_memory->ReadBlock(m_addr, this->data(), this->size_bytes());
|
||||
} else {
|
||||
m_memory.ReadBlockUnsafe(m_addr, this->data(), this->size_bytes());
|
||||
m_memory->ReadBlockUnsafe(m_addr, this->data(), this->size_bytes());
|
||||
}
|
||||
}
|
||||
return m_data_span;
|
||||
@ -133,18 +150,19 @@ public:
|
||||
|
||||
void Write(std::span<T> write_data) noexcept {
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Cached) {
|
||||
m_memory.WriteBlockCached(m_addr, write_data.data(), this->size_bytes());
|
||||
m_memory->WriteBlockCached(m_addr, write_data.data(), this->size_bytes());
|
||||
} else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
m_memory.WriteBlock(m_addr, write_data.data(), this->size_bytes());
|
||||
m_memory->WriteBlock(m_addr, write_data.data(), this->size_bytes());
|
||||
} else {
|
||||
m_memory.WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes());
|
||||
m_memory->WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
bool TrySetSpan() noexcept {
|
||||
if (u8* ptr = m_memory.GetSpan(m_addr, this->size_bytes()); ptr) {
|
||||
if (u8* ptr = m_memory->GetSpan(m_addr, this->size_bytes()); ptr) {
|
||||
m_data_span = {reinterpret_cast<T*>(ptr), this->size()};
|
||||
m_span_valid = true;
|
||||
m_is_data_copy = false;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -159,7 +177,7 @@ protected:
|
||||
return m_addr_changed;
|
||||
}
|
||||
|
||||
M& m_memory;
|
||||
M* m_memory;
|
||||
u64 m_addr{};
|
||||
size_t m_size{};
|
||||
std::span<T> m_data_span{};
|
||||
@ -175,17 +193,7 @@ public:
|
||||
GuestMemoryScoped() = delete;
|
||||
explicit GuestMemoryScoped(M& memory, u64 addr, std::size_t size,
|
||||
Common::ScratchBuffer<T>* backup = nullptr)
|
||||
: GuestMemory<M, T, FLAGS>(memory, addr, size, backup) {
|
||||
if constexpr (!(FLAGS & GuestMemoryFlags::Read)) {
|
||||
if (!this->TrySetSpan()) {
|
||||
if (backup) {
|
||||
this->m_data_span = *backup;
|
||||
this->m_span_valid = true;
|
||||
this->m_is_data_copy = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
: GuestMemory<M, T, FLAGS>(memory, addr, size, backup) {}
|
||||
|
||||
~GuestMemoryScoped() {
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Write) {
|
||||
@ -196,15 +204,17 @@ public:
|
||||
if (this->AddressChanged() || this->IsDataCopy()) {
|
||||
ASSERT(this->m_span_valid);
|
||||
if constexpr (FLAGS & GuestMemoryFlags::Cached) {
|
||||
this->m_memory.WriteBlockCached(this->m_addr, this->data(), this->size_bytes());
|
||||
this->m_memory->WriteBlockCached(this->m_addr, this->data(),
|
||||
this->size_bytes());
|
||||
} else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
|
||||
this->m_memory.WriteBlock(this->m_addr, this->data(), this->size_bytes());
|
||||
this->m_memory->WriteBlock(this->m_addr, this->data(), this->size_bytes());
|
||||
} else {
|
||||
this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes());
|
||||
this->m_memory->WriteBlockUnsafe(this->m_addr, this->data(),
|
||||
this->size_bytes());
|
||||
}
|
||||
} else if constexpr ((FLAGS & GuestMemoryFlags::Safe) ||
|
||||
(FLAGS & GuestMemoryFlags::Cached)) {
|
||||
this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes());
|
||||
this->m_memory->InvalidateRegion(this->m_addr, this->size_bytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,8 +4,9 @@
|
||||
#include <random>
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic.h"
|
||||
#include "core/arm/dynarmic/dynarmic_exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/gpu_dirty_memory_manager.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
@ -1258,6 +1259,10 @@ void KProcess::InitializeInterfaces() {
|
||||
|
||||
#ifdef HAS_NCE
|
||||
if (this->IsApplication() && Settings::IsNceEnabled()) {
|
||||
// Register the scoped JIT handler before creating any NCE instances
|
||||
// so that its signal handler will appear first in the signal chain.
|
||||
Core::ScopedJitExecution::RegisterHandler();
|
||||
|
||||
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
||||
m_arm_interfaces[i] = std::make_unique<Core::ArmNce>(m_kernel.System(), true, i);
|
||||
}
|
||||
|
@ -130,9 +130,9 @@ enum class AppletProgramId : u64 {
|
||||
|
||||
enum class LibraryAppletMode : u32 {
|
||||
AllForeground = 0,
|
||||
Background = 1,
|
||||
NoUI = 2,
|
||||
BackgroundIndirectDisplay = 3,
|
||||
PartialForeground = 1,
|
||||
NoUi = 2,
|
||||
PartialForegroundIndirectDisplay = 3,
|
||||
AllForegroundInitiallyHidden = 4,
|
||||
};
|
||||
|
||||
|
@ -68,9 +68,9 @@ void SoftwareKeyboard::Initialize() {
|
||||
case LibraryAppletMode::AllForeground:
|
||||
InitializeForeground();
|
||||
break;
|
||||
case LibraryAppletMode::Background:
|
||||
case LibraryAppletMode::BackgroundIndirectDisplay:
|
||||
InitializeBackground(applet_mode);
|
||||
case LibraryAppletMode::PartialForeground:
|
||||
case LibraryAppletMode::PartialForegroundIndirectDisplay:
|
||||
InitializePartialForeground(applet_mode);
|
||||
break;
|
||||
default:
|
||||
ASSERT_MSG(false, "Invalid LibraryAppletMode={}", applet_mode);
|
||||
@ -243,7 +243,7 @@ void SoftwareKeyboard::InitializeForeground() {
|
||||
InitializeFrontendNormalKeyboard();
|
||||
}
|
||||
|
||||
void SoftwareKeyboard::InitializeBackground(LibraryAppletMode library_applet_mode) {
|
||||
void SoftwareKeyboard::InitializePartialForeground(LibraryAppletMode library_applet_mode) {
|
||||
LOG_INFO(Service_AM, "Initializing Inline Software Keyboard Applet.");
|
||||
|
||||
is_background = true;
|
||||
@ -258,9 +258,9 @@ void SoftwareKeyboard::InitializeBackground(LibraryAppletMode library_applet_mod
|
||||
swkbd_inline_initialize_arg.size());
|
||||
|
||||
if (swkbd_initialize_arg.library_applet_mode_flag) {
|
||||
ASSERT(library_applet_mode == LibraryAppletMode::Background);
|
||||
ASSERT(library_applet_mode == LibraryAppletMode::PartialForeground);
|
||||
} else {
|
||||
ASSERT(library_applet_mode == LibraryAppletMode::BackgroundIndirectDisplay);
|
||||
ASSERT(library_applet_mode == LibraryAppletMode::PartialForegroundIndirectDisplay);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ private:
|
||||
void InitializeForeground();
|
||||
|
||||
/// Initializes the inline software keyboard.
|
||||
void InitializeBackground(LibraryAppletMode library_applet_mode);
|
||||
void InitializePartialForeground(LibraryAppletMode library_applet_mode);
|
||||
|
||||
/// Processes the text check sent by the application.
|
||||
void ProcessTextCheck();
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2024 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/settings.h"
|
||||
#include "core/hle/kernel/k_transfer_memory.h"
|
||||
#include "core/hle/service/am/applet_data_broker.h"
|
||||
#include "core/hle/service/am/applet_manager.h"
|
||||
@ -16,6 +17,34 @@ namespace Service::AM {
|
||||
|
||||
namespace {
|
||||
|
||||
bool ShouldCreateGuestApplet(AppletId applet_id) {
|
||||
#define X(Name, name) \
|
||||
if (applet_id == AppletId::Name && \
|
||||
Settings::values.name##_applet_mode.GetValue() != Settings::AppletMode::LLE) { \
|
||||
return false; \
|
||||
}
|
||||
|
||||
X(Cabinet, cabinet)
|
||||
X(Controller, controller)
|
||||
X(DataErase, data_erase)
|
||||
X(Error, error)
|
||||
X(NetConnect, net_connect)
|
||||
X(ProfileSelect, player_select)
|
||||
X(SoftwareKeyboard, swkbd)
|
||||
X(MiiEdit, mii_edit)
|
||||
X(Web, web)
|
||||
X(Shop, shop)
|
||||
X(PhotoViewer, photo_viewer)
|
||||
X(OfflineWeb, offline_web)
|
||||
X(LoginShare, login_share)
|
||||
X(WebAuth, wifi_web_auth)
|
||||
X(MyPage, my_page)
|
||||
|
||||
#undef X
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
AppletProgramId AppletIdToProgramId(AppletId applet_id) {
|
||||
switch (applet_id) {
|
||||
case AppletId::OverlayDisplay:
|
||||
@ -63,9 +92,10 @@ AppletProgramId AppletIdToProgramId(AppletId applet_id) {
|
||||
}
|
||||
}
|
||||
|
||||
[[maybe_unused]] std::shared_ptr<ILibraryAppletAccessor> CreateGuestApplet(
|
||||
Core::System& system, std::shared_ptr<Applet> caller_applet, AppletId applet_id,
|
||||
LibraryAppletMode mode) {
|
||||
std::shared_ptr<ILibraryAppletAccessor> CreateGuestApplet(Core::System& system,
|
||||
std::shared_ptr<Applet> caller_applet,
|
||||
AppletId applet_id,
|
||||
LibraryAppletMode mode) {
|
||||
const auto program_id = static_cast<u64>(AppletIdToProgramId(applet_id));
|
||||
if (program_id == 0) {
|
||||
// Unknown applet
|
||||
@ -87,24 +117,18 @@ AppletProgramId AppletIdToProgramId(AppletId applet_id) {
|
||||
// Set focus state
|
||||
switch (mode) {
|
||||
case LibraryAppletMode::AllForeground:
|
||||
case LibraryAppletMode::NoUI:
|
||||
applet->focus_state = FocusState::InFocus;
|
||||
case LibraryAppletMode::NoUi:
|
||||
case LibraryAppletMode::PartialForeground:
|
||||
case LibraryAppletMode::PartialForegroundIndirectDisplay:
|
||||
applet->hid_registration.EnableAppletToGetInput(true);
|
||||
applet->focus_state = FocusState::InFocus;
|
||||
applet->message_queue.PushMessage(AppletMessageQueue::AppletMessage::ChangeIntoForeground);
|
||||
applet->message_queue.PushMessage(AppletMessageQueue::AppletMessage::FocusStateChanged);
|
||||
break;
|
||||
case LibraryAppletMode::AllForegroundInitiallyHidden:
|
||||
applet->system_buffer_manager.SetWindowVisibility(false);
|
||||
applet->focus_state = FocusState::NotInFocus;
|
||||
applet->hid_registration.EnableAppletToGetInput(false);
|
||||
applet->message_queue.PushMessage(AppletMessageQueue::AppletMessage::FocusStateChanged);
|
||||
break;
|
||||
case LibraryAppletMode::Background:
|
||||
case LibraryAppletMode::BackgroundIndirectDisplay:
|
||||
default:
|
||||
applet->focus_state = FocusState::Background;
|
||||
applet->hid_registration.EnableAppletToGetInput(true);
|
||||
applet->message_queue.PushMessage(AppletMessageQueue::AppletMessage::FocusStateChanged);
|
||||
applet->focus_state = FocusState::NotInFocus;
|
||||
applet->system_buffer_manager.SetWindowVisibility(false);
|
||||
applet->message_queue.PushMessage(AppletMessageQueue::AppletMessage::ChangeIntoBackground);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -117,9 +141,10 @@ AppletProgramId AppletIdToProgramId(AppletId applet_id) {
|
||||
return std::make_shared<ILibraryAppletAccessor>(system, broker, applet);
|
||||
}
|
||||
|
||||
[[maybe_unused]] std::shared_ptr<ILibraryAppletAccessor> CreateFrontendApplet(
|
||||
Core::System& system, std::shared_ptr<Applet> caller_applet, AppletId applet_id,
|
||||
LibraryAppletMode mode) {
|
||||
std::shared_ptr<ILibraryAppletAccessor> CreateFrontendApplet(Core::System& system,
|
||||
std::shared_ptr<Applet> caller_applet,
|
||||
AppletId applet_id,
|
||||
LibraryAppletMode mode) {
|
||||
const auto program_id = static_cast<u64>(AppletIdToProgramId(applet_id));
|
||||
|
||||
auto process = std::make_unique<Process>(system);
|
||||
@ -163,7 +188,13 @@ void ILibraryAppletCreator::CreateLibraryApplet(HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_AM, "called with applet_id={:08X}, applet_mode={:08X}", applet_id,
|
||||
applet_mode);
|
||||
|
||||
auto library_applet = CreateFrontendApplet(system, applet, applet_id, applet_mode);
|
||||
std::shared_ptr<ILibraryAppletAccessor> library_applet;
|
||||
if (ShouldCreateGuestApplet(applet_id)) {
|
||||
library_applet = CreateGuestApplet(system, applet, applet_id, applet_mode);
|
||||
}
|
||||
if (!library_applet) {
|
||||
library_applet = CreateFrontendApplet(system, applet, applet_id, applet_mode);
|
||||
}
|
||||
if (!library_applet) {
|
||||
LOG_ERROR(Service_AM, "Applet doesn't exist! applet_id={}", applet_id);
|
||||
|
||||
|
@ -1,10 +1,13 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2024 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/hle/service/am/am_results.h"
|
||||
#include "core/hle/service/am/frontend/applets.h"
|
||||
#include "core/hle/service/am/self_controller.h"
|
||||
#include "core/hle/service/caps/caps_su.h"
|
||||
#include "core/hle/service/hle_ipc.h"
|
||||
#include "core/hle/service/ipc_helpers.h"
|
||||
#include "core/hle/service/nvnflinger/fb_share_buffer_manager.h"
|
||||
#include "core/hle/service/nvnflinger/nvnflinger.h"
|
||||
@ -47,7 +50,7 @@ ISelfController::ISelfController(Core::System& system_, std::shared_ptr<Applet>
|
||||
{50, &ISelfController::SetHandlesRequestToDisplay, "SetHandlesRequestToDisplay"},
|
||||
{51, &ISelfController::ApproveToDisplay, "ApproveToDisplay"},
|
||||
{60, nullptr, "OverrideAutoSleepTimeAndDimmingTime"},
|
||||
{61, nullptr, "SetMediaPlaybackState"},
|
||||
{61, &ISelfController::SetMediaPlaybackState, "SetMediaPlaybackState"},
|
||||
{62, &ISelfController::SetIdleTimeDetectionExtension, "SetIdleTimeDetectionExtension"},
|
||||
{63, &ISelfController::GetIdleTimeDetectionExtension, "GetIdleTimeDetectionExtension"},
|
||||
{64, nullptr, "SetInputDetectionSourceSet"},
|
||||
@ -288,7 +291,8 @@ void ISelfController::GetSystemSharedBufferHandle(HLERequestContext& ctx) {
|
||||
}
|
||||
|
||||
Result ISelfController::EnsureBufferSharingEnabled(Kernel::KProcess* process) {
|
||||
if (applet->system_buffer_manager.Initialize(&nvnflinger, process, applet->applet_id)) {
|
||||
if (applet->system_buffer_manager.Initialize(&nvnflinger, process, applet->applet_id,
|
||||
applet->library_applet_mode)) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
@ -323,6 +327,16 @@ void ISelfController::ApproveToDisplay(HLERequestContext& ctx) {
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void ISelfController::SetMediaPlaybackState(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const u8 state = rp.Pop<u8>();
|
||||
|
||||
LOG_WARNING(Service_AM, "(STUBBED) called, state={}", state);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void ISelfController::SetIdleTimeDetectionExtension(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/hle_ipc.h"
|
||||
#include "core/hle/service/kernel_helpers.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
@ -38,6 +39,7 @@ private:
|
||||
void CreateManagedDisplaySeparableLayer(HLERequestContext& ctx);
|
||||
void SetHandlesRequestToDisplay(HLERequestContext& ctx);
|
||||
void ApproveToDisplay(HLERequestContext& ctx);
|
||||
void SetMediaPlaybackState(HLERequestContext& ctx);
|
||||
void SetIdleTimeDetectionExtension(HLERequestContext& ctx);
|
||||
void GetIdleTimeDetectionExtension(HLERequestContext& ctx);
|
||||
void ReportUserIsActive(HLERequestContext& ctx);
|
||||
|
@ -17,11 +17,12 @@ SystemBufferManager::~SystemBufferManager() {
|
||||
|
||||
// Clean up shared layers.
|
||||
if (m_buffer_sharing_enabled) {
|
||||
m_nvnflinger->GetSystemBufferManager().Finalize(m_process);
|
||||
}
|
||||
}
|
||||
|
||||
bool SystemBufferManager::Initialize(Nvnflinger::Nvnflinger* nvnflinger, Kernel::KProcess* process,
|
||||
AppletId applet_id) {
|
||||
AppletId applet_id, LibraryAppletMode mode) {
|
||||
if (m_nvnflinger) {
|
||||
return m_buffer_sharing_enabled;
|
||||
}
|
||||
@ -36,9 +37,15 @@ bool SystemBufferManager::Initialize(Nvnflinger::Nvnflinger* nvnflinger, Kernel:
|
||||
return false;
|
||||
}
|
||||
|
||||
Nvnflinger::LayerBlending blending = Nvnflinger::LayerBlending::None;
|
||||
if (mode == LibraryAppletMode::PartialForeground ||
|
||||
mode == LibraryAppletMode::PartialForegroundIndirectDisplay) {
|
||||
blending = Nvnflinger::LayerBlending::Coverage;
|
||||
}
|
||||
|
||||
const auto display_id = m_nvnflinger->OpenDisplay("Default").value();
|
||||
const auto res = m_nvnflinger->GetSystemBufferManager().Initialize(
|
||||
&m_system_shared_buffer_id, &m_system_shared_layer_id, display_id);
|
||||
m_process, &m_system_shared_buffer_id, &m_system_shared_layer_id, display_id, blending);
|
||||
|
||||
if (res.IsSuccess()) {
|
||||
m_buffer_sharing_enabled = true;
|
||||
@ -62,8 +69,12 @@ void SystemBufferManager::SetWindowVisibility(bool visible) {
|
||||
|
||||
Result SystemBufferManager::WriteAppletCaptureBuffer(bool* out_was_written,
|
||||
s32* out_fbshare_layer_index) {
|
||||
// TODO
|
||||
R_SUCCEED();
|
||||
if (!m_buffer_sharing_enabled) {
|
||||
return VI::ResultPermissionDenied;
|
||||
}
|
||||
|
||||
return m_nvnflinger->GetSystemBufferManager().WriteAppletCaptureBuffer(out_was_written,
|
||||
out_fbshare_layer_index);
|
||||
}
|
||||
|
||||
} // namespace Service::AM
|
||||
|
@ -27,7 +27,8 @@ public:
|
||||
SystemBufferManager();
|
||||
~SystemBufferManager();
|
||||
|
||||
bool Initialize(Nvnflinger::Nvnflinger* flinger, Kernel::KProcess* process, AppletId applet_id);
|
||||
bool Initialize(Nvnflinger::Nvnflinger* flinger, Kernel::KProcess* process, AppletId applet_id,
|
||||
LibraryAppletMode mode);
|
||||
|
||||
void GetSystemSharedLayerHandle(u64* out_system_shared_buffer_id,
|
||||
u64* out_system_shared_layer_id) {
|
||||
|
@ -62,12 +62,12 @@ void IWindowController::SetAppletWindowVisibility(HLERequestContext& ctx) {
|
||||
applet->hid_registration.EnableAppletToGetInput(visible);
|
||||
|
||||
if (visible) {
|
||||
applet->message_queue.PushMessage(AppletMessageQueue::AppletMessage::ChangeIntoForeground);
|
||||
applet->focus_state = FocusState::InFocus;
|
||||
applet->message_queue.PushMessage(AppletMessageQueue::AppletMessage::ChangeIntoForeground);
|
||||
} else {
|
||||
applet->focus_state = FocusState::NotInFocus;
|
||||
applet->message_queue.PushMessage(AppletMessageQueue::AppletMessage::ChangeIntoBackground);
|
||||
}
|
||||
applet->message_queue.PushMessage(AppletMessageQueue::AppletMessage::FocusStateChanged);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
|
@ -49,6 +49,7 @@ SessionId Container::OpenSession(Kernel::KProcess* process) {
|
||||
continue;
|
||||
}
|
||||
if (session.process == process) {
|
||||
session.ref_count++;
|
||||
return session.id;
|
||||
}
|
||||
}
|
||||
@ -66,6 +67,7 @@ SessionId Container::OpenSession(Kernel::KProcess* process) {
|
||||
}
|
||||
auto& session = impl->sessions[new_id];
|
||||
session.is_active = true;
|
||||
session.ref_count = 1;
|
||||
// Optimization
|
||||
if (process->IsApplication()) {
|
||||
auto& page_table = process->GetPageTable().GetBasePageTable();
|
||||
@ -114,8 +116,11 @@ SessionId Container::OpenSession(Kernel::KProcess* process) {
|
||||
|
||||
void Container::CloseSession(SessionId session_id) {
|
||||
std::scoped_lock lk(impl->session_guard);
|
||||
impl->file.UnmapAllHandles(session_id);
|
||||
auto& session = impl->sessions[session_id.id];
|
||||
if (--session.ref_count > 0) {
|
||||
return;
|
||||
}
|
||||
impl->file.UnmapAllHandles(session_id);
|
||||
auto& smmu = impl->host1x.MemoryManager();
|
||||
if (session.has_preallocated_area) {
|
||||
const DAddr region_start = session.mapper->GetRegionStart();
|
||||
|
@ -46,6 +46,7 @@ struct Session {
|
||||
bool has_preallocated_area{};
|
||||
std::unique_ptr<HeapMapper> mapper{};
|
||||
bool is_active{};
|
||||
s32 ref_count{};
|
||||
};
|
||||
|
||||
class Container {
|
||||
@ -67,10 +68,7 @@ public:
|
||||
const SyncpointManager& GetSyncpointManager() const;
|
||||
|
||||
struct Host1xDeviceFileData {
|
||||
std::unordered_map<DeviceFD, u32> fd_to_id{};
|
||||
std::deque<u32> syncpts_accumulated{};
|
||||
u32 nvdec_next_id{};
|
||||
u32 vic_next_id{};
|
||||
};
|
||||
|
||||
Host1xDeviceFileData& Host1xDeviceFile();
|
||||
|
@ -3,110 +3,21 @@
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
#define BOOST_NO_MT
|
||||
#include <boost/pool/detail/mutex.hpp>
|
||||
#undef BOOST_NO_MT
|
||||
#include <boost/icl/interval.hpp>
|
||||
#include <boost/icl/interval_base_set.hpp>
|
||||
#include <boost/icl/interval_set.hpp>
|
||||
#include <boost/icl/split_interval_map.hpp>
|
||||
#include <boost/pool/pool.hpp>
|
||||
#include <boost/pool/pool_alloc.hpp>
|
||||
#include <boost/pool/poolfwd.hpp>
|
||||
|
||||
#include "common/range_sets.h"
|
||||
#include "common/range_sets.inc"
|
||||
#include "core/hle/service/nvdrv/core/heap_mapper.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace boost {
|
||||
template <typename T>
|
||||
class fast_pool_allocator<T, default_user_allocator_new_delete, details::pool::null_mutex, 4096, 0>;
|
||||
}
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
|
||||
using IntervalCompare = std::less<DAddr>;
|
||||
using IntervalInstance = boost::icl::interval_type_default<DAddr, std::less>;
|
||||
using IntervalAllocator = boost::fast_pool_allocator<DAddr>;
|
||||
using IntervalSet = boost::icl::interval_set<DAddr>;
|
||||
using IntervalType = typename IntervalSet::interval_type;
|
||||
|
||||
template <typename Type>
|
||||
struct counter_add_functor : public boost::icl::identity_based_inplace_combine<Type> {
|
||||
// types
|
||||
typedef counter_add_functor<Type> type;
|
||||
typedef boost::icl::identity_based_inplace_combine<Type> base_type;
|
||||
|
||||
// public member functions
|
||||
void operator()(Type& current, const Type& added) const {
|
||||
current += added;
|
||||
if (current < base_type::identity_element()) {
|
||||
current = base_type::identity_element();
|
||||
}
|
||||
}
|
||||
|
||||
// public static functions
|
||||
static void version(Type&){};
|
||||
};
|
||||
|
||||
using OverlapCombine = counter_add_functor<int>;
|
||||
using OverlapSection = boost::icl::inter_section<int>;
|
||||
using OverlapCounter = boost::icl::split_interval_map<DAddr, int>;
|
||||
|
||||
struct HeapMapper::HeapMapperInternal {
|
||||
HeapMapperInternal(Tegra::Host1x::Host1x& host1x) : device_memory{host1x.MemoryManager()} {}
|
||||
HeapMapperInternal(Tegra::Host1x::Host1x& host1x) : m_device_memory{host1x.MemoryManager()} {}
|
||||
~HeapMapperInternal() = default;
|
||||
|
||||
template <typename Func>
|
||||
void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size,
|
||||
Func&& func) {
|
||||
const DAddr start_address = cpu_addr;
|
||||
const DAddr end_address = start_address + size;
|
||||
const IntervalType search_interval{start_address, end_address};
|
||||
auto it = current_range.lower_bound(search_interval);
|
||||
if (it == current_range.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = current_range.upper_bound(search_interval);
|
||||
for (; it != end_it; it++) {
|
||||
auto& inter = it->first;
|
||||
DAddr inter_addr_end = inter.upper();
|
||||
DAddr inter_addr = inter.lower();
|
||||
if (inter_addr_end > end_address) {
|
||||
inter_addr_end = end_address;
|
||||
}
|
||||
if (inter_addr < start_address) {
|
||||
inter_addr = start_address;
|
||||
}
|
||||
func(inter_addr, inter_addr_end, it->second);
|
||||
}
|
||||
}
|
||||
|
||||
void RemoveEachInOverlapCounter(OverlapCounter& current_range,
|
||||
const IntervalType search_interval, int subtract_value) {
|
||||
bool any_removals = false;
|
||||
current_range.add(std::make_pair(search_interval, subtract_value));
|
||||
do {
|
||||
any_removals = false;
|
||||
auto it = current_range.lower_bound(search_interval);
|
||||
if (it == current_range.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = current_range.upper_bound(search_interval);
|
||||
for (; it != end_it; it++) {
|
||||
if (it->second <= 0) {
|
||||
any_removals = true;
|
||||
current_range.erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} while (any_removals);
|
||||
}
|
||||
|
||||
IntervalSet base_set;
|
||||
OverlapCounter mapping_overlaps;
|
||||
Tegra::MaxwellDeviceMemoryManager& device_memory;
|
||||
std::mutex guard;
|
||||
Common::RangeSet<VAddr> m_temporary_set;
|
||||
Common::OverlapRangeSet<VAddr> m_mapped_ranges;
|
||||
Tegra::MaxwellDeviceMemoryManager& m_device_memory;
|
||||
std::mutex m_guard;
|
||||
};
|
||||
|
||||
HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid,
|
||||
@ -116,60 +27,48 @@ HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size,
|
||||
}
|
||||
|
||||
HeapMapper::~HeapMapper() {
|
||||
m_internal->device_memory.Unmap(m_daddress, m_size);
|
||||
// Unmap whatever has been mapped.
|
||||
m_internal->m_mapped_ranges.ForEach([this](VAddr start_addr, VAddr end_addr, s32 count) {
|
||||
const size_t sub_size = end_addr - start_addr;
|
||||
const size_t offset = start_addr - m_vaddress;
|
||||
m_internal->m_device_memory.Unmap(m_daddress + offset, sub_size);
|
||||
});
|
||||
}
|
||||
|
||||
DAddr HeapMapper::Map(VAddr start, size_t size) {
|
||||
std::scoped_lock lk(m_internal->guard);
|
||||
m_internal->base_set.clear();
|
||||
const IntervalType interval{start, start + size};
|
||||
m_internal->base_set.insert(interval);
|
||||
m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
|
||||
[this](VAddr start_addr, VAddr end_addr, int) {
|
||||
const IntervalType other{start_addr, end_addr};
|
||||
m_internal->base_set.subtract(other);
|
||||
});
|
||||
if (!m_internal->base_set.empty()) {
|
||||
auto it = m_internal->base_set.begin();
|
||||
auto end_it = m_internal->base_set.end();
|
||||
for (; it != end_it; it++) {
|
||||
const VAddr inter_addr_end = it->upper();
|
||||
const VAddr inter_addr = it->lower();
|
||||
const size_t offset = inter_addr - m_vaddress;
|
||||
const size_t sub_size = inter_addr_end - inter_addr;
|
||||
m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size,
|
||||
m_asid);
|
||||
}
|
||||
}
|
||||
m_internal->mapping_overlaps += std::make_pair(interval, 1);
|
||||
m_internal->base_set.clear();
|
||||
return m_daddress + (start - m_vaddress);
|
||||
std::scoped_lock lk(m_internal->m_guard);
|
||||
// Add the mapping range to a temporary range set.
|
||||
m_internal->m_temporary_set.Clear();
|
||||
m_internal->m_temporary_set.Add(start, size);
|
||||
|
||||
// Remove anything that's already mapped from the temporary range set.
|
||||
m_internal->m_mapped_ranges.ForEachInRange(
|
||||
start, size, [this](VAddr start_addr, VAddr end_addr, s32) {
|
||||
m_internal->m_temporary_set.Subtract(start_addr, end_addr - start_addr);
|
||||
});
|
||||
|
||||
// Map anything that has not been mapped yet.
|
||||
m_internal->m_temporary_set.ForEach([this](VAddr start_addr, VAddr end_addr) {
|
||||
const size_t sub_size = end_addr - start_addr;
|
||||
const size_t offset = start_addr - m_vaddress;
|
||||
m_internal->m_device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, m_asid);
|
||||
});
|
||||
|
||||
// Add the mapping range to the split map, to register the map and overlaps.
|
||||
m_internal->m_mapped_ranges.Add(start, size);
|
||||
m_internal->m_temporary_set.Clear();
|
||||
return m_daddress + static_cast<DAddr>(start - m_vaddress);
|
||||
}
|
||||
|
||||
void HeapMapper::Unmap(VAddr start, size_t size) {
|
||||
std::scoped_lock lk(m_internal->guard);
|
||||
m_internal->base_set.clear();
|
||||
m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
|
||||
[this](VAddr start_addr, VAddr end_addr, int value) {
|
||||
if (value <= 1) {
|
||||
const IntervalType other{start_addr, end_addr};
|
||||
m_internal->base_set.insert(other);
|
||||
}
|
||||
});
|
||||
if (!m_internal->base_set.empty()) {
|
||||
auto it = m_internal->base_set.begin();
|
||||
auto end_it = m_internal->base_set.end();
|
||||
for (; it != end_it; it++) {
|
||||
const VAddr inter_addr_end = it->upper();
|
||||
const VAddr inter_addr = it->lower();
|
||||
const size_t offset = inter_addr - m_vaddress;
|
||||
const size_t sub_size = inter_addr_end - inter_addr;
|
||||
m_internal->device_memory.Unmap(m_daddress + offset, sub_size);
|
||||
}
|
||||
}
|
||||
const IntervalType to_remove{start, start + size};
|
||||
m_internal->RemoveEachInOverlapCounter(m_internal->mapping_overlaps, to_remove, -1);
|
||||
m_internal->base_set.clear();
|
||||
std::scoped_lock lk(m_internal->m_guard);
|
||||
|
||||
// Just subtract the range and whatever is deleted, unmap it.
|
||||
m_internal->m_mapped_ranges.Subtract(start, size, [this](VAddr start_addr, VAddr end_addr) {
|
||||
const size_t sub_size = end_addr - start_addr;
|
||||
const size_t offset = start_addr - m_vaddress;
|
||||
m_internal->m_device_memory.Unmap(m_daddress + offset, sub_size);
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::NvCore
|
||||
|
@ -333,9 +333,13 @@ void NvMap::UnmapAllHandles(NvCore::SessionId session_id) {
|
||||
}();
|
||||
|
||||
for (auto& [id, handle] : handles_copy) {
|
||||
if (handle->session_id.id == session_id.id) {
|
||||
FreeHandle(id, false);
|
||||
{
|
||||
std::scoped_lock lk{handle->mutex};
|
||||
if (handle->session_id.id != session_id.id || handle->dupes <= 0) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
FreeHandle(id, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,22 @@
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
namespace {
|
||||
|
||||
Tegra::BlendMode ConvertBlending(Service::Nvnflinger::LayerBlending blending) {
|
||||
switch (blending) {
|
||||
case Service::Nvnflinger::LayerBlending::None:
|
||||
default:
|
||||
return Tegra::BlendMode::Opaque;
|
||||
case Service::Nvnflinger::LayerBlending::Premultiplied:
|
||||
return Tegra::BlendMode::Premultiplied;
|
||||
case Service::Nvnflinger::LayerBlending::Coverage:
|
||||
return Tegra::BlendMode::Coverage;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
nvdisp_disp0::nvdisp_disp0(Core::System& system_, NvCore::Container& core)
|
||||
: nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {}
|
||||
nvdisp_disp0::~nvdisp_disp0() = default;
|
||||
@ -56,6 +72,7 @@ void nvdisp_disp0::Composite(std::span<const Nvnflinger::HwcLayer> sorted_layers
|
||||
.pixel_format = layer.format,
|
||||
.transform_flags = layer.transform,
|
||||
.crop_rect = layer.crop_rect,
|
||||
.blending = ConvertBlending(layer.blending),
|
||||
});
|
||||
|
||||
for (size_t i = 0; i < layer.acquire_fence.num_fences; i++) {
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
@ -21,13 +22,8 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
|
||||
switch (command.group) {
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x1: {
|
||||
auto& host1x_file = core.Host1xDeviceFile();
|
||||
if (!host1x_file.fd_to_id.contains(fd)) {
|
||||
host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++;
|
||||
}
|
||||
case 0x1:
|
||||
return WrapFixedVariable(this, &nvhost_nvdec::Submit, input, output, fd);
|
||||
}
|
||||
case 0x2:
|
||||
return WrapFixed(this, &nvhost_nvdec::GetSyncpoint, input, output);
|
||||
case 0x3:
|
||||
@ -72,15 +68,12 @@ void nvhost_nvdec::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video stream started");
|
||||
system.SetNVDECActive(true);
|
||||
sessions[fd] = session_id;
|
||||
host1x.StartDevice(fd, Tegra::Host1x::ChannelType::NvDec, channel_syncpoint);
|
||||
}
|
||||
|
||||
void nvhost_nvdec::OnClose(DeviceFD fd) {
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
|
||||
auto& host1x_file = core.Host1xDeviceFile();
|
||||
const auto iter = host1x_file.fd_to_id.find(fd);
|
||||
if (iter != host1x_file.fd_to_id.end()) {
|
||||
system.GPU().ClearCdmaInstance(iter->second);
|
||||
}
|
||||
host1x.StopDevice(fd, Tegra::Host1x::ChannelType::NvDec);
|
||||
system.SetNVDECActive(false);
|
||||
auto it = sessions.find(fd);
|
||||
if (it != sessions.end()) {
|
||||
|
@ -55,8 +55,9 @@ std::size_t WriteVectors(std::span<u8> dst, const std::vector<T>& src, std::size
|
||||
|
||||
nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_,
|
||||
NvCore::ChannelType channel_type_)
|
||||
: nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()},
|
||||
nvmap{core.GetNvMapFile()}, channel_type{channel_type_} {
|
||||
: nvdevice{system_}, host1x{system_.Host1x()}, core{core_},
|
||||
syncpoint_manager{core.GetSyncpointManager()}, nvmap{core.GetNvMapFile()},
|
||||
channel_type{channel_type_} {
|
||||
auto& syncpts_accumulated = core.Host1xDeviceFile().syncpts_accumulated;
|
||||
if (syncpts_accumulated.empty()) {
|
||||
channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
|
||||
@ -95,24 +96,24 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De
|
||||
offset += SliceVectors(data, syncpt_increments, params.syncpoint_count, offset);
|
||||
offset += SliceVectors(data, fence_thresholds, params.fence_count, offset);
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
auto* session = core.GetSession(sessions[fd]);
|
||||
|
||||
if (gpu.UseNvdec()) {
|
||||
for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
|
||||
const SyncptIncr& syncpt_incr = syncpt_increments[i];
|
||||
fence_thresholds[i] =
|
||||
syncpoint_manager.IncrementSyncpointMaxExt(syncpt_incr.id, syncpt_incr.increments);
|
||||
}
|
||||
for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
|
||||
const SyncptIncr& syncpt_incr = syncpt_increments[i];
|
||||
fence_thresholds[i] =
|
||||
syncpoint_manager.IncrementSyncpointMaxExt(syncpt_incr.id, syncpt_incr.increments);
|
||||
}
|
||||
|
||||
for (const auto& cmd_buffer : command_buffers) {
|
||||
const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
|
||||
ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
|
||||
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
|
||||
session->process->GetMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
|
||||
cmdlist.size() * sizeof(u32));
|
||||
gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
|
||||
Core::Memory::CpuGuestMemory<Tegra::ChCommandHeader,
|
||||
Core::Memory::GuestMemoryFlags::SafeRead>
|
||||
cmdlist(session->process->GetMemory(), object->address + cmd_buffer.offset,
|
||||
cmd_buffer.word_count);
|
||||
host1x.PushEntries(fd, std::move(cmdlist));
|
||||
}
|
||||
|
||||
// Some games expect command_buffers to be written back
|
||||
offset = 0;
|
||||
offset += WriteVectors(data, command_buffers, offset);
|
||||
|
@ -119,6 +119,7 @@ protected:
|
||||
|
||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||
|
||||
Tegra::Host1x::Host1x& host1x;
|
||||
u32 channel_syncpoint;
|
||||
s32_le nvmap_fd{};
|
||||
u32_le submit_timeout{};
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
@ -21,13 +22,8 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
||||
switch (command.group) {
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x1: {
|
||||
auto& host1x_file = core.Host1xDeviceFile();
|
||||
if (!host1x_file.fd_to_id.contains(fd)) {
|
||||
host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++;
|
||||
}
|
||||
case 0x1:
|
||||
return WrapFixedVariable(this, &nvhost_vic::Submit, input, output, fd);
|
||||
}
|
||||
case 0x2:
|
||||
return WrapFixed(this, &nvhost_vic::GetSyncpoint, input, output);
|
||||
case 0x3:
|
||||
@ -70,14 +66,11 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
||||
|
||||
void nvhost_vic::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {
|
||||
sessions[fd] = session_id;
|
||||
host1x.StartDevice(fd, Tegra::Host1x::ChannelType::VIC, channel_syncpoint);
|
||||
}
|
||||
|
||||
void nvhost_vic::OnClose(DeviceFD fd) {
|
||||
auto& host1x_file = core.Host1xDeviceFile();
|
||||
const auto iter = host1x_file.fd_to_id.find(fd);
|
||||
if (iter != host1x_file.fd_to_id.end()) {
|
||||
system.GPU().ClearCdmaInstance(iter->second);
|
||||
}
|
||||
host1x.StopDevice(fd, Tegra::Host1x::ChannelType::VIC);
|
||||
sessions.erase(fd);
|
||||
}
|
||||
|
||||
|
@ -14,24 +14,20 @@
|
||||
#include "core/hle/service/nvnflinger/ui/graphic_buffer.h"
|
||||
#include "core/hle/service/vi/layer/vi_layer.h"
|
||||
#include "core/hle/service/vi/vi_results.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace Service::Nvnflinger {
|
||||
|
||||
namespace {
|
||||
|
||||
Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address,
|
||||
std::unique_ptr<Kernel::KPageGroup>* out_page_group,
|
||||
Core::System& system, u32 size) {
|
||||
Result AllocateSharedBufferMemory(std::unique_ptr<Kernel::KPageGroup>* out_page_group,
|
||||
Core::System& system, u32 size) {
|
||||
using Core::Memory::YUZU_PAGESIZE;
|
||||
|
||||
// Allocate memory for the system shared buffer.
|
||||
// FIXME: Because the gmmu can only point to cpu addresses, we need
|
||||
// to map this in the application space to allow it to be used.
|
||||
// FIXME: Add proper smmu emulation.
|
||||
// FIXME: This memory belongs to vi's .data section.
|
||||
auto& kernel = system.Kernel();
|
||||
auto* process = system.ApplicationProcess();
|
||||
auto& page_table = process->GetPageTable();
|
||||
|
||||
// Hold a temporary page group reference while we try to map it.
|
||||
auto pg = std::make_unique<Kernel::KPageGroup>(
|
||||
@ -43,6 +39,30 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address,
|
||||
Kernel::KMemoryManager::EncodeOption(Kernel::KMemoryManager::Pool::Secure,
|
||||
Kernel::KMemoryManager::Direction::FromBack)));
|
||||
|
||||
// Fill the output data with red.
|
||||
for (auto& block : *pg) {
|
||||
u32* start = system.DeviceMemory().GetPointer<u32>(block.GetAddress());
|
||||
u32* end = system.DeviceMemory().GetPointer<u32>(block.GetAddress() + block.GetSize());
|
||||
|
||||
for (; start < end; start++) {
|
||||
*start = 0xFF0000FF;
|
||||
}
|
||||
}
|
||||
|
||||
// Return the mapped page group.
|
||||
*out_page_group = std::move(pg);
|
||||
|
||||
// We succeeded.
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result MapSharedBufferIntoProcessAddressSpace(Common::ProcessAddress* out_map_address,
|
||||
std::unique_ptr<Kernel::KPageGroup>& pg,
|
||||
Kernel::KProcess* process, Core::System& system) {
|
||||
using Core::Memory::YUZU_PAGESIZE;
|
||||
|
||||
auto& page_table = process->GetPageTable();
|
||||
|
||||
// Get bounds of where mapping is possible.
|
||||
const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart());
|
||||
const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE;
|
||||
@ -64,9 +84,6 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address,
|
||||
// Return failure, if necessary
|
||||
R_UNLESS(i < 64, res);
|
||||
|
||||
// Return the mapped page group.
|
||||
*out_page_group = std::move(pg);
|
||||
|
||||
// We succeeded.
|
||||
R_SUCCEED();
|
||||
}
|
||||
@ -135,6 +152,13 @@ Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, Nvidia::D
|
||||
R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size, nvmap_fd));
|
||||
}
|
||||
|
||||
void FreeHandle(u32 handle, Nvidia::Module& nvdrv, Nvidia::DeviceFD nvmap_fd) {
|
||||
auto nvmap = nvdrv.GetDevice<Nvidia::Devices::nvmap>(nvmap_fd);
|
||||
ASSERT(nvmap != nullptr);
|
||||
|
||||
R_ASSERT(FreeNvMapHandle(*nvmap, handle, nvmap_fd));
|
||||
}
|
||||
|
||||
constexpr auto SharedBufferBlockLinearFormat = android::PixelFormat::Rgba8888;
|
||||
constexpr u32 SharedBufferBlockLinearBpp = 4;
|
||||
|
||||
@ -186,53 +210,97 @@ FbShareBufferManager::FbShareBufferManager(Core::System& system, Nvnflinger& fli
|
||||
|
||||
FbShareBufferManager::~FbShareBufferManager() = default;
|
||||
|
||||
Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u64 display_id) {
|
||||
Result FbShareBufferManager::Initialize(Kernel::KProcess* owner_process, u64* out_buffer_id,
|
||||
u64* out_layer_handle, u64 display_id,
|
||||
LayerBlending blending) {
|
||||
std::scoped_lock lk{m_guard};
|
||||
|
||||
// Ensure we have not already created a buffer.
|
||||
R_UNLESS(m_buffer_id == 0, VI::ResultOperationFailed);
|
||||
// Ensure we haven't already created.
|
||||
const u64 aruid = owner_process->GetProcessId();
|
||||
R_UNLESS(!m_sessions.contains(aruid), VI::ResultPermissionDenied);
|
||||
|
||||
// Allocate memory and space for the shared buffer.
|
||||
Common::ProcessAddress map_address;
|
||||
R_TRY(AllocateIoForProcessAddressSpace(std::addressof(map_address),
|
||||
std::addressof(m_buffer_page_group), m_system,
|
||||
SharedBufferSize));
|
||||
// Allocate memory for the shared buffer if needed.
|
||||
if (!m_buffer_page_group) {
|
||||
R_TRY(AllocateSharedBufferMemory(std::addressof(m_buffer_page_group), m_system,
|
||||
SharedBufferSize));
|
||||
|
||||
// Record buffer id.
|
||||
m_buffer_id = m_next_buffer_id++;
|
||||
|
||||
// Record display id.
|
||||
m_display_id = display_id;
|
||||
}
|
||||
|
||||
// Map into process.
|
||||
Common::ProcessAddress map_address{};
|
||||
R_TRY(MapSharedBufferIntoProcessAddressSpace(std::addressof(map_address), m_buffer_page_group,
|
||||
owner_process, m_system));
|
||||
|
||||
// Create new session.
|
||||
auto [it, was_emplaced] = m_sessions.emplace(aruid, FbShareSession{});
|
||||
auto& session = it->second;
|
||||
|
||||
auto& container = m_nvdrv->GetContainer();
|
||||
m_session_id = container.OpenSession(m_system.ApplicationProcess());
|
||||
m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id);
|
||||
session.session_id = container.OpenSession(owner_process);
|
||||
session.nvmap_fd = m_nvdrv->Open("/dev/nvmap", session.session_id);
|
||||
|
||||
// Create an nvmap handle for the buffer and assign the memory to it.
|
||||
R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd,
|
||||
map_address, SharedBufferSize));
|
||||
|
||||
// Record the display id.
|
||||
m_display_id = display_id;
|
||||
R_TRY(AllocateHandleForBuffer(std::addressof(session.buffer_nvmap_handle), *m_nvdrv,
|
||||
session.nvmap_fd, map_address, SharedBufferSize));
|
||||
|
||||
// Create and open a layer for the display.
|
||||
m_layer_id = m_flinger.CreateLayer(m_display_id).value();
|
||||
m_flinger.OpenLayer(m_layer_id);
|
||||
|
||||
// Set up the buffer.
|
||||
m_buffer_id = m_next_buffer_id++;
|
||||
session.layer_id = m_flinger.CreateLayer(m_display_id, blending).value();
|
||||
m_flinger.OpenLayer(session.layer_id);
|
||||
|
||||
// Get the layer.
|
||||
VI::Layer* layer = m_flinger.FindLayer(m_display_id, m_layer_id);
|
||||
VI::Layer* layer = m_flinger.FindLayer(m_display_id, session.layer_id);
|
||||
ASSERT(layer != nullptr);
|
||||
|
||||
// Get the producer and set preallocated buffers.
|
||||
auto& producer = layer->GetBufferQueue();
|
||||
MakeGraphicBuffer(producer, 0, m_buffer_nvmap_handle);
|
||||
MakeGraphicBuffer(producer, 1, m_buffer_nvmap_handle);
|
||||
MakeGraphicBuffer(producer, 0, session.buffer_nvmap_handle);
|
||||
MakeGraphicBuffer(producer, 1, session.buffer_nvmap_handle);
|
||||
|
||||
// Assign outputs.
|
||||
*out_buffer_id = m_buffer_id;
|
||||
*out_layer_id = m_layer_id;
|
||||
*out_layer_handle = session.layer_id;
|
||||
|
||||
// We succeeded.
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void FbShareBufferManager::Finalize(Kernel::KProcess* owner_process) {
|
||||
std::scoped_lock lk{m_guard};
|
||||
|
||||
if (m_buffer_id == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const u64 aruid = owner_process->GetProcessId();
|
||||
const auto it = m_sessions.find(aruid);
|
||||
if (it == m_sessions.end()) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto& session = it->second;
|
||||
|
||||
// Destroy the layer.
|
||||
m_flinger.DestroyLayer(session.layer_id);
|
||||
|
||||
// Close nvmap handle.
|
||||
FreeHandle(session.buffer_nvmap_handle, *m_nvdrv, session.nvmap_fd);
|
||||
|
||||
// Close nvmap device.
|
||||
m_nvdrv->Close(session.nvmap_fd);
|
||||
|
||||
// Close session.
|
||||
auto& container = m_nvdrv->GetContainer();
|
||||
container.CloseSession(session.session_id);
|
||||
|
||||
// Erase.
|
||||
m_sessions.erase(it);
|
||||
}
|
||||
|
||||
Result FbShareBufferManager::GetSharedBufferMemoryHandleId(u64* out_buffer_size,
|
||||
s32* out_nvmap_handle,
|
||||
SharedMemoryPoolLayout* out_pool_layout,
|
||||
@ -242,17 +310,18 @@ Result FbShareBufferManager::GetSharedBufferMemoryHandleId(u64* out_buffer_size,
|
||||
|
||||
R_UNLESS(m_buffer_id > 0, VI::ResultNotFound);
|
||||
R_UNLESS(buffer_id == m_buffer_id, VI::ResultNotFound);
|
||||
R_UNLESS(m_sessions.contains(applet_resource_user_id), VI::ResultNotFound);
|
||||
|
||||
*out_pool_layout = SharedBufferPoolLayout;
|
||||
*out_buffer_size = SharedBufferSize;
|
||||
*out_nvmap_handle = m_buffer_nvmap_handle;
|
||||
*out_nvmap_handle = m_sessions[applet_resource_user_id].buffer_nvmap_handle;
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result FbShareBufferManager::GetLayerFromId(VI::Layer** out_layer, u64 layer_id) {
|
||||
// Ensure the layer id is valid.
|
||||
R_UNLESS(m_layer_id > 0 && layer_id == m_layer_id, VI::ResultNotFound);
|
||||
R_UNLESS(layer_id > 0, VI::ResultNotFound);
|
||||
|
||||
// Get the layer.
|
||||
VI::Layer* layer = m_flinger.FindLayer(m_display_id, layer_id);
|
||||
@ -309,6 +378,10 @@ Result FbShareBufferManager::PresentSharedFrameBuffer(android::Fence fence,
|
||||
android::Status::NoError,
|
||||
VI::ResultOperationFailed);
|
||||
|
||||
ON_RESULT_FAILURE {
|
||||
producer.CancelBuffer(static_cast<s32>(slot), fence);
|
||||
};
|
||||
|
||||
// Queue the buffer to the producer.
|
||||
android::QueueBufferInput input{};
|
||||
android::QueueBufferOutput output{};
|
||||
@ -342,4 +415,33 @@ Result FbShareBufferManager::GetSharedFrameBufferAcquirableEvent(Kernel::KReadab
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result FbShareBufferManager::WriteAppletCaptureBuffer(bool* out_was_written, s32* out_layer_index) {
|
||||
std::vector<u8> capture_buffer(m_system.GPU().GetAppletCaptureBuffer());
|
||||
Common::ScratchBuffer<u32> scratch;
|
||||
|
||||
// TODO: this could be optimized
|
||||
s64 e = -1280 * 768 * 4;
|
||||
for (auto& block : *m_buffer_page_group) {
|
||||
u8* start = m_system.DeviceMemory().GetPointer<u8>(block.GetAddress());
|
||||
u8* end = m_system.DeviceMemory().GetPointer<u8>(block.GetAddress() + block.GetSize());
|
||||
|
||||
for (; start < end; start++) {
|
||||
*start = 0;
|
||||
|
||||
if (e >= 0 && e < static_cast<s64>(capture_buffer.size())) {
|
||||
*start = capture_buffer[e];
|
||||
}
|
||||
e++;
|
||||
}
|
||||
|
||||
m_system.GPU().Host1x().MemoryManager().ApplyOpOnPointer(start, scratch, [&](DAddr addr) {
|
||||
m_system.GPU().InvalidateRegion(addr, end - start);
|
||||
});
|
||||
}
|
||||
|
||||
*out_was_written = true;
|
||||
*out_layer_index = 1;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
} // namespace Service::Nvnflinger
|
||||
|
@ -3,9 +3,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "common/math_util.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "core/hle/service/nvnflinger/hwc_layer.h"
|
||||
#include "core/hle/service/nvnflinger/nvnflinger.h"
|
||||
#include "core/hle/service/nvnflinger/ui/fence.h"
|
||||
|
||||
@ -29,13 +32,18 @@ struct SharedMemoryPoolLayout {
|
||||
};
|
||||
static_assert(sizeof(SharedMemoryPoolLayout) == 0x188, "SharedMemoryPoolLayout has wrong size");
|
||||
|
||||
struct FbShareSession;
|
||||
|
||||
class FbShareBufferManager final {
|
||||
public:
|
||||
explicit FbShareBufferManager(Core::System& system, Nvnflinger& flinger,
|
||||
std::shared_ptr<Nvidia::Module> nvdrv);
|
||||
~FbShareBufferManager();
|
||||
|
||||
Result Initialize(u64* out_buffer_id, u64* out_layer_handle, u64 display_id);
|
||||
Result Initialize(Kernel::KProcess* owner_process, u64* out_buffer_id, u64* out_layer_handle,
|
||||
u64 display_id, LayerBlending blending);
|
||||
void Finalize(Kernel::KProcess* owner_process);
|
||||
|
||||
Result GetSharedBufferMemoryHandleId(u64* out_buffer_size, s32* out_nvmap_handle,
|
||||
SharedMemoryPoolLayout* out_pool_layout, u64 buffer_id,
|
||||
u64 applet_resource_user_id);
|
||||
@ -45,6 +53,8 @@ public:
|
||||
u32 transform, s32 swap_interval, u64 layer_id, s64 slot);
|
||||
Result GetSharedFrameBufferAcquirableEvent(Kernel::KReadableEvent** out_event, u64 layer_id);
|
||||
|
||||
Result WriteAppletCaptureBuffer(bool* out_was_written, s32* out_layer_index);
|
||||
|
||||
private:
|
||||
Result GetLayerFromId(VI::Layer** out_layer, u64 layer_id);
|
||||
|
||||
@ -52,11 +62,8 @@ private:
|
||||
u64 m_next_buffer_id = 1;
|
||||
u64 m_display_id = 0;
|
||||
u64 m_buffer_id = 0;
|
||||
u64 m_layer_id = 0;
|
||||
u32 m_buffer_nvmap_handle = 0;
|
||||
SharedMemoryPoolLayout m_pool_layout = {};
|
||||
Nvidia::DeviceFD m_nvmap_fd = {};
|
||||
Nvidia::NvCore::SessionId m_session_id = {};
|
||||
std::map<u64, FbShareSession> m_sessions;
|
||||
std::unique_ptr<Kernel::KPageGroup> m_buffer_page_group;
|
||||
|
||||
std::mutex m_guard;
|
||||
@ -65,4 +72,11 @@ private:
|
||||
std::shared_ptr<Nvidia::Module> m_nvdrv;
|
||||
};
|
||||
|
||||
struct FbShareSession {
|
||||
Nvidia::DeviceFD nvmap_fd = {};
|
||||
Nvidia::NvCore::SessionId session_id = {};
|
||||
u64 layer_id = {};
|
||||
u32 buffer_nvmap_handle = 0;
|
||||
};
|
||||
|
||||
} // namespace Service::Nvnflinger
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
||||
#include "core/hle/service/nvnflinger/buffer_item.h"
|
||||
#include "core/hle/service/nvnflinger/buffer_item_consumer.h"
|
||||
#include "core/hle/service/nvnflinger/buffer_queue_producer.h"
|
||||
#include "core/hle/service/nvnflinger/hardware_composer.h"
|
||||
#include "core/hle/service/nvnflinger/hwc_layer.h"
|
||||
#include "core/hle/service/nvnflinger/ui/graphic_buffer.h"
|
||||
@ -46,31 +45,9 @@ HardwareComposer::HardwareComposer() = default;
|
||||
HardwareComposer::~HardwareComposer() = default;
|
||||
|
||||
u32 HardwareComposer::ComposeLocked(f32* out_speed_scale, VI::Display& display,
|
||||
Nvidia::Devices::nvdisp_disp0& nvdisp, u32 frame_advance) {
|
||||
Nvidia::Devices::nvdisp_disp0& nvdisp) {
|
||||
boost::container::small_vector<HwcLayer, 2> composition_stack;
|
||||
|
||||
m_frame_number += frame_advance;
|
||||
|
||||
// Release any necessary framebuffers.
|
||||
for (auto& [layer_id, framebuffer] : m_framebuffers) {
|
||||
if (framebuffer.release_frame_number > m_frame_number) {
|
||||
// Not yet ready to release this framebuffer.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!framebuffer.is_acquired) {
|
||||
// Already released.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (auto* layer = display.FindLayer(layer_id); layer != nullptr) {
|
||||
// TODO: support release fence
|
||||
// This is needed to prevent screen tearing
|
||||
layer->GetConsumer().ReleaseBuffer(framebuffer.item, android::Fence::NoFence());
|
||||
framebuffer.is_acquired = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Set default speed limit to 100%.
|
||||
*out_speed_scale = 1.0f;
|
||||
|
||||
@ -109,6 +86,7 @@ u32 HardwareComposer::ComposeLocked(f32* out_speed_scale, VI::Display& display,
|
||||
.height = igbp_buffer.Height(),
|
||||
.stride = igbp_buffer.Stride(),
|
||||
.z_index = 0,
|
||||
.blending = layer.GetBlending(),
|
||||
.transform = static_cast<android::BufferTransformFlags>(item.transform),
|
||||
.crop_rect = item.crop,
|
||||
.acquire_fence = item.fence,
|
||||
@ -142,7 +120,30 @@ u32 HardwareComposer::ComposeLocked(f32* out_speed_scale, VI::Display& display,
|
||||
MicroProfileFlip();
|
||||
|
||||
// Advance by at least one frame.
|
||||
return swap_interval.value_or(1);
|
||||
const u32 frame_advance = swap_interval.value_or(1);
|
||||
m_frame_number += frame_advance;
|
||||
|
||||
// Release any necessary framebuffers.
|
||||
for (auto& [layer_id, framebuffer] : m_framebuffers) {
|
||||
if (framebuffer.release_frame_number > m_frame_number) {
|
||||
// Not yet ready to release this framebuffer.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!framebuffer.is_acquired) {
|
||||
// Already released.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (auto* layer = display.FindLayer(layer_id); layer != nullptr) {
|
||||
// TODO: support release fence
|
||||
// This is needed to prevent screen tearing
|
||||
layer->GetConsumer().ReleaseBuffer(framebuffer.item, android::Fence::NoFence());
|
||||
framebuffer.is_acquired = false;
|
||||
}
|
||||
}
|
||||
|
||||
return frame_advance;
|
||||
}
|
||||
|
||||
void HardwareComposer::RemoveLayerLocked(VI::Display& display, LayerId layer_id) {
|
||||
|
@ -27,7 +27,7 @@ public:
|
||||
~HardwareComposer();
|
||||
|
||||
u32 ComposeLocked(f32* out_speed_scale, VI::Display& display,
|
||||
Nvidia::Devices::nvdisp_disp0& nvdisp, u32 frame_advance);
|
||||
Nvidia::Devices::nvdisp_disp0& nvdisp);
|
||||
void RemoveLayerLocked(VI::Display& display, LayerId layer_id);
|
||||
|
||||
private:
|
||||
|
@ -11,6 +11,18 @@
|
||||
|
||||
namespace Service::Nvnflinger {
|
||||
|
||||
// hwc_layer_t::blending values
|
||||
enum class LayerBlending : u32 {
|
||||
// No blending
|
||||
None = 0x100,
|
||||
|
||||
// ONE / ONE_MINUS_SRC_ALPHA
|
||||
Premultiplied = 0x105,
|
||||
|
||||
// SRC_ALPHA / ONE_MINUS_SRC_ALPHA
|
||||
Coverage = 0x405,
|
||||
};
|
||||
|
||||
struct HwcLayer {
|
||||
u32 buffer_handle;
|
||||
u32 offset;
|
||||
@ -19,6 +31,7 @@ struct HwcLayer {
|
||||
u32 height;
|
||||
u32 stride;
|
||||
s32 z_index;
|
||||
LayerBlending blending;
|
||||
android::BufferTransformFlags transform;
|
||||
Common::Rectangle<int> crop_rect;
|
||||
android::Fence acquire_fence;
|
||||
|
@ -157,7 +157,7 @@ bool Nvnflinger::CloseDisplay(u64 display_id) {
|
||||
return true;
|
||||
}
|
||||
|
||||
std::optional<u64> Nvnflinger::CreateLayer(u64 display_id) {
|
||||
std::optional<u64> Nvnflinger::CreateLayer(u64 display_id, LayerBlending blending) {
|
||||
const auto lock_guard = Lock();
|
||||
auto* const display = FindDisplay(display_id);
|
||||
|
||||
@ -166,13 +166,14 @@ std::optional<u64> Nvnflinger::CreateLayer(u64 display_id) {
|
||||
}
|
||||
|
||||
const u64 layer_id = next_layer_id++;
|
||||
CreateLayerAtId(*display, layer_id);
|
||||
CreateLayerAtId(*display, layer_id, blending);
|
||||
return layer_id;
|
||||
}
|
||||
|
||||
void Nvnflinger::CreateLayerAtId(VI::Display& display, u64 layer_id) {
|
||||
void Nvnflinger::CreateLayerAtId(VI::Display& display, u64 layer_id, LayerBlending blending) {
|
||||
const auto buffer_id = next_buffer_queue_id++;
|
||||
display.CreateLayer(layer_id, buffer_id, nvdrv->container);
|
||||
display.FindLayer(layer_id)->SetBlending(blending);
|
||||
}
|
||||
|
||||
bool Nvnflinger::OpenLayer(u64 layer_id) {
|
||||
@ -291,8 +292,7 @@ void Nvnflinger::Compose() {
|
||||
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
|
||||
ASSERT(nvdisp);
|
||||
|
||||
swap_interval = display.GetComposer().ComposeLocked(&compose_speed_scale, display, *nvdisp,
|
||||
swap_interval);
|
||||
swap_interval = display.GetComposer().ComposeLocked(&compose_speed_scale, display, *nvdisp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "common/thread.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/hle/service/kernel_helpers.h"
|
||||
#include "core/hle/service/nvnflinger/hwc_layer.h"
|
||||
|
||||
namespace Common {
|
||||
class Event;
|
||||
@ -72,7 +73,8 @@ public:
|
||||
/// Creates a layer on the specified display and returns the layer ID.
|
||||
///
|
||||
/// If an invalid display ID is specified, then an empty optional is returned.
|
||||
[[nodiscard]] std::optional<u64> CreateLayer(u64 display_id);
|
||||
[[nodiscard]] std::optional<u64> CreateLayer(u64 display_id,
|
||||
LayerBlending blending = LayerBlending::None);
|
||||
|
||||
/// Opens a layer on all displays for the given layer ID.
|
||||
bool OpenLayer(u64 layer_id);
|
||||
@ -128,7 +130,7 @@ private:
|
||||
[[nodiscard]] VI::Layer* FindLayer(u64 display_id, u64 layer_id);
|
||||
|
||||
/// Creates a layer with the specified layer ID in the desired display.
|
||||
void CreateLayerAtId(VI::Display& display, u64 layer_id);
|
||||
void CreateLayerAtId(VI::Display& display, u64 layer_id, LayerBlending blending);
|
||||
|
||||
void SplitVSync(std::stop_token stop_token);
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "core/hle/service/nvnflinger/hwc_layer.h"
|
||||
#include "core/hle/service/vi/layer/vi_layer.h"
|
||||
|
||||
namespace Service::VI {
|
||||
@ -8,8 +9,9 @@ namespace Service::VI {
|
||||
Layer::Layer(u64 layer_id_, u32 binder_id_, android::BufferQueueCore& core_,
|
||||
android::BufferQueueProducer& binder_,
|
||||
std::shared_ptr<android::BufferItemConsumer>&& consumer_)
|
||||
: layer_id{layer_id_}, binder_id{binder_id_}, core{core_}, binder{binder_},
|
||||
consumer{std::move(consumer_)}, open{false}, visible{true} {}
|
||||
: layer_id{layer_id_}, binder_id{binder_id_}, core{core_}, binder{binder_}, consumer{std::move(
|
||||
consumer_)},
|
||||
blending{Nvnflinger::LayerBlending::None}, open{false}, visible{true} {}
|
||||
|
||||
Layer::~Layer() = default;
|
||||
|
||||
|
@ -14,6 +14,10 @@ class BufferQueueCore;
|
||||
class BufferQueueProducer;
|
||||
} // namespace Service::android
|
||||
|
||||
namespace Service::Nvnflinger {
|
||||
enum class LayerBlending : u32;
|
||||
}
|
||||
|
||||
namespace Service::VI {
|
||||
|
||||
/// Represents a single display layer.
|
||||
@ -92,12 +96,21 @@ public:
|
||||
return !std::exchange(open, true);
|
||||
}
|
||||
|
||||
Nvnflinger::LayerBlending GetBlending() {
|
||||
return blending;
|
||||
}
|
||||
|
||||
void SetBlending(Nvnflinger::LayerBlending b) {
|
||||
blending = b;
|
||||
}
|
||||
|
||||
private:
|
||||
const u64 layer_id;
|
||||
const u32 binder_id;
|
||||
android::BufferQueueCore& core;
|
||||
android::BufferQueueProducer& binder;
|
||||
std::shared_ptr<android::BufferItemConsumer> consumer;
|
||||
Service::Nvnflinger::LayerBlending blending;
|
||||
bool open;
|
||||
bool visible;
|
||||
};
|
||||
|
@ -64,6 +64,8 @@ public:
|
||||
Memory(Memory&&) = default;
|
||||
Memory& operator=(Memory&&) = delete;
|
||||
|
||||
static constexpr bool HAS_FLUSH_INVALIDATION = false;
|
||||
|
||||
/**
|
||||
* Resets the state of the Memory system.
|
||||
*/
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_process_page_table.h"
|
||||
#include "core/hle/service/hid/hid_server.h"
|
||||
#include "core/hle/service/sm/sm.h"
|
||||
#include "core/memory.h"
|
||||
@ -85,8 +86,12 @@ VAddr StandardVmCallbacks::SanitizeAddress(VAddr in) const {
|
||||
if ((in < metadata.main_nso_extents.base ||
|
||||
in >= metadata.main_nso_extents.base + metadata.main_nso_extents.size) &&
|
||||
(in < metadata.heap_extents.base ||
|
||||
in >= metadata.heap_extents.base + metadata.heap_extents.size)) {
|
||||
LOG_ERROR(CheatEngine,
|
||||
in >= metadata.heap_extents.base + metadata.heap_extents.size) &&
|
||||
(in < metadata.alias_extents.base ||
|
||||
in >= metadata.heap_extents.base + metadata.alias_extents.size) &&
|
||||
(in < metadata.aslr_extents.base ||
|
||||
in >= metadata.heap_extents.base + metadata.aslr_extents.size)) {
|
||||
LOG_DEBUG(CheatEngine,
|
||||
"Cheat attempting to access memory at invalid address={:016X}, if this "
|
||||
"persists, "
|
||||
"the cheat may be incorrect. However, this may be normal early in execution if "
|
||||
@ -211,16 +216,14 @@ void CheatEngine::Initialize() {
|
||||
.base = GetInteger(page_table.GetHeapRegionStart()),
|
||||
.size = page_table.GetHeapRegionSize(),
|
||||
};
|
||||
|
||||
metadata.address_space_extents = {
|
||||
.base = GetInteger(page_table.GetAddressSpaceStart()),
|
||||
.size = page_table.GetAddressSpaceSize(),
|
||||
};
|
||||
|
||||
metadata.alias_extents = {
|
||||
metadata.aslr_extents = {
|
||||
.base = GetInteger(page_table.GetAliasCodeRegionStart()),
|
||||
.size = page_table.GetAliasCodeRegionSize(),
|
||||
};
|
||||
metadata.alias_extents = {
|
||||
.base = GetInteger(page_table.GetAliasRegionStart()),
|
||||
.size = page_table.GetAliasRegionSize(),
|
||||
};
|
||||
|
||||
is_pending_reload.exchange(true);
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ private:
|
||||
VAddr SanitizeAddress(VAddr address) const;
|
||||
|
||||
const CheatProcessMetadata& metadata;
|
||||
System& system;
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
// Intermediary class that parses a text file or other disk format for storing cheats into a
|
||||
|
@ -18,7 +18,7 @@ struct CheatProcessMetadata {
|
||||
MemoryRegionExtents main_nso_extents{};
|
||||
MemoryRegionExtents heap_extents{};
|
||||
MemoryRegionExtents alias_extents{};
|
||||
MemoryRegionExtents address_space_extents{};
|
||||
MemoryRegionExtents aslr_extents{};
|
||||
std::array<u8, 0x20> main_nso_build_id{};
|
||||
};
|
||||
|
||||
|
@ -322,8 +322,9 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
|
||||
} break;
|
||||
case CheatVmOpcodeType::EndConditionalBlock: {
|
||||
// 20000000
|
||||
// There's actually nothing left to process here!
|
||||
opcode.opcode = EndConditionalOpcode{};
|
||||
opcode.opcode = EndConditionalOpcode{
|
||||
.is_else = ((first_dword >> 24) & 0xf) == 1,
|
||||
};
|
||||
} break;
|
||||
case CheatVmOpcodeType::ControlLoop: {
|
||||
// 300R0000 VVVVVVVV
|
||||
@ -555,6 +556,18 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
|
||||
.idx = first_dword & 0xF,
|
||||
};
|
||||
} break;
|
||||
case CheatVmOpcodeType::PauseProcess: {
|
||||
/* FF0????? */
|
||||
/* FF0 = opcode 0xFF0 */
|
||||
/* Pauses the current process. */
|
||||
opcode.opcode = PauseProcessOpcode{};
|
||||
} break;
|
||||
case CheatVmOpcodeType::ResumeProcess: {
|
||||
/* FF0????? */
|
||||
/* FF0 = opcode 0xFF0 */
|
||||
/* Pauses the current process. */
|
||||
opcode.opcode = ResumeProcessOpcode{};
|
||||
} break;
|
||||
case CheatVmOpcodeType::DebugLog: {
|
||||
// FFFTIX##
|
||||
// FFFTI0Ma aaaaaaaa
|
||||
@ -621,7 +634,7 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
|
||||
return valid;
|
||||
}
|
||||
|
||||
void DmntCheatVm::SkipConditionalBlock() {
|
||||
void DmntCheatVm::SkipConditionalBlock(bool is_if) {
|
||||
if (condition_depth > 0) {
|
||||
// We want to continue until we're out of the current block.
|
||||
const std::size_t desired_depth = condition_depth - 1;
|
||||
@ -637,8 +650,12 @@ void DmntCheatVm::SkipConditionalBlock() {
|
||||
// We also support nesting of conditional blocks, and Gateway does not.
|
||||
if (skip_opcode.begin_conditional_block) {
|
||||
condition_depth++;
|
||||
} else if (std::holds_alternative<EndConditionalOpcode>(skip_opcode.opcode)) {
|
||||
condition_depth--;
|
||||
} else if (auto end_cond = std::get_if<EndConditionalOpcode>(&skip_opcode.opcode)) {
|
||||
if (!end_cond->is_else) {
|
||||
condition_depth--;
|
||||
} else if (is_if && condition_depth - 1 == desired_depth) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -675,6 +692,10 @@ u64 DmntCheatVm::GetCheatProcessAddress(const CheatProcessMetadata& metadata,
|
||||
return metadata.main_nso_extents.base + rel_address;
|
||||
case MemoryAccessType::Heap:
|
||||
return metadata.heap_extents.base + rel_address;
|
||||
case MemoryAccessType::Alias:
|
||||
return metadata.alias_extents.base + rel_address;
|
||||
case MemoryAccessType::Aslr:
|
||||
return metadata.aslr_extents.base + rel_address;
|
||||
}
|
||||
}
|
||||
|
||||
@ -682,7 +703,6 @@ void DmntCheatVm::ResetState() {
|
||||
registers.fill(0);
|
||||
saved_values.fill(0);
|
||||
loop_tops.fill(0);
|
||||
static_registers.fill(0);
|
||||
instruction_ptr = 0;
|
||||
condition_depth = 0;
|
||||
decode_success = true;
|
||||
@ -794,13 +814,18 @@ void DmntCheatVm::Execute(const CheatProcessMetadata& metadata) {
|
||||
}
|
||||
// Skip conditional block if condition not met.
|
||||
if (!cond_met) {
|
||||
SkipConditionalBlock();
|
||||
SkipConditionalBlock(true);
|
||||
}
|
||||
} else if (std::holds_alternative<EndConditionalOpcode>(cur_opcode.opcode)) {
|
||||
// Decrement the condition depth.
|
||||
// We will assume, graciously, that mismatched conditional block ends are a nop.
|
||||
if (condition_depth > 0) {
|
||||
condition_depth--;
|
||||
} else if (auto end_cond = std::get_if<EndConditionalOpcode>(&cur_opcode.opcode)) {
|
||||
if (end_cond->is_else) {
|
||||
/* Skip to the end of the conditional block. */
|
||||
this->SkipConditionalBlock(false);
|
||||
} else {
|
||||
/* Decrement the condition depth. */
|
||||
/* We will assume, graciously, that mismatched conditional block ends are a nop. */
|
||||
if (condition_depth > 0) {
|
||||
condition_depth--;
|
||||
}
|
||||
}
|
||||
} else if (auto ctrl_loop = std::get_if<ControlLoopOpcode>(&cur_opcode.opcode)) {
|
||||
if (ctrl_loop->start_loop) {
|
||||
@ -908,7 +933,7 @@ void DmntCheatVm::Execute(const CheatProcessMetadata& metadata) {
|
||||
// Check for keypress.
|
||||
if ((begin_keypress_cond->key_mask & kDown) != begin_keypress_cond->key_mask) {
|
||||
// Keys not pressed. Skip conditional block.
|
||||
SkipConditionalBlock();
|
||||
SkipConditionalBlock(true);
|
||||
}
|
||||
} else if (auto perform_math_reg =
|
||||
std::get_if<PerformArithmeticRegisterOpcode>(&cur_opcode.opcode)) {
|
||||
@ -1116,7 +1141,7 @@ void DmntCheatVm::Execute(const CheatProcessMetadata& metadata) {
|
||||
|
||||
// Skip conditional block if condition not met.
|
||||
if (!cond_met) {
|
||||
SkipConditionalBlock();
|
||||
SkipConditionalBlock(true);
|
||||
}
|
||||
} else if (auto save_restore_reg =
|
||||
std::get_if<SaveRestoreRegisterOpcode>(&cur_opcode.opcode)) {
|
||||
@ -1178,6 +1203,10 @@ void DmntCheatVm::Execute(const CheatProcessMetadata& metadata) {
|
||||
// Store a register to a static register.
|
||||
static_registers[rw_static_reg->static_idx] = registers[rw_static_reg->idx];
|
||||
}
|
||||
} else if (std::holds_alternative<PauseProcessOpcode>(cur_opcode.opcode)) {
|
||||
// TODO: Pause cheat process
|
||||
} else if (std::holds_alternative<ResumeProcessOpcode>(cur_opcode.opcode)) {
|
||||
// TODO: Resume cheat process
|
||||
} else if (auto debug_log = std::get_if<DebugLogOpcode>(&cur_opcode.opcode)) {
|
||||
// Read value from memory.
|
||||
u64 log_value = 0;
|
||||
|
@ -42,12 +42,16 @@ enum class CheatVmOpcodeType : u32 {
|
||||
DoubleExtendedWidth = 0xF0,
|
||||
|
||||
// Double-extended width opcodes.
|
||||
PauseProcess = 0xFF0,
|
||||
ResumeProcess = 0xFF1,
|
||||
DebugLog = 0xFFF,
|
||||
};
|
||||
|
||||
enum class MemoryAccessType : u32 {
|
||||
MainNso = 0,
|
||||
Heap = 1,
|
||||
Alias = 2,
|
||||
Aslr = 3,
|
||||
};
|
||||
|
||||
enum class ConditionalComparisonType : u32 {
|
||||
@ -131,7 +135,9 @@ struct BeginConditionalOpcode {
|
||||
VmInt value{};
|
||||
};
|
||||
|
||||
struct EndConditionalOpcode {};
|
||||
struct EndConditionalOpcode {
|
||||
bool is_else;
|
||||
};
|
||||
|
||||
struct ControlLoopOpcode {
|
||||
bool start_loop{};
|
||||
@ -222,6 +228,10 @@ struct ReadWriteStaticRegisterOpcode {
|
||||
u32 idx{};
|
||||
};
|
||||
|
||||
struct PauseProcessOpcode {};
|
||||
|
||||
struct ResumeProcessOpcode {};
|
||||
|
||||
struct DebugLogOpcode {
|
||||
u32 bit_width{};
|
||||
u32 log_id{};
|
||||
@ -244,8 +254,8 @@ struct CheatVmOpcode {
|
||||
PerformArithmeticStaticOpcode, BeginKeypressConditionalOpcode,
|
||||
PerformArithmeticRegisterOpcode, StoreRegisterToAddressOpcode,
|
||||
BeginRegisterConditionalOpcode, SaveRestoreRegisterOpcode,
|
||||
SaveRestoreRegisterMaskOpcode, ReadWriteStaticRegisterOpcode, DebugLogOpcode,
|
||||
UnrecognizedInstruction>
|
||||
SaveRestoreRegisterMaskOpcode, ReadWriteStaticRegisterOpcode, PauseProcessOpcode,
|
||||
ResumeProcessOpcode, DebugLogOpcode, UnrecognizedInstruction>
|
||||
opcode{};
|
||||
};
|
||||
|
||||
@ -296,7 +306,7 @@ private:
|
||||
std::array<std::size_t, NumRegisters> loop_tops{};
|
||||
|
||||
bool DecodeNextOpcode(CheatVmOpcode& out);
|
||||
void SkipConditionalBlock();
|
||||
void SkipConditionalBlock(bool is_if);
|
||||
void ResetState();
|
||||
|
||||
// For implementing the DebugLog opcode.
|
||||
|
@ -401,6 +401,14 @@ void Config::ReadNetworkValues() {
|
||||
EndGroup();
|
||||
}
|
||||
|
||||
void Config::ReadLibraryAppletValues() {
|
||||
BeginGroup(Settings::TranslateCategory(Settings::Category::LibraryApplet));
|
||||
|
||||
ReadCategory(Settings::Category::LibraryApplet);
|
||||
|
||||
EndGroup();
|
||||
}
|
||||
|
||||
void Config::ReadValues() {
|
||||
if (global) {
|
||||
ReadDataStorageValues();
|
||||
@ -410,6 +418,7 @@ void Config::ReadValues() {
|
||||
ReadServiceValues();
|
||||
ReadWebServiceValues();
|
||||
ReadMiscellaneousValues();
|
||||
ReadLibraryAppletValues();
|
||||
}
|
||||
ReadControlValues();
|
||||
ReadCoreValues();
|
||||
@ -511,6 +520,7 @@ void Config::SaveValues() {
|
||||
SaveNetworkValues();
|
||||
SaveWebServiceValues();
|
||||
SaveMiscellaneousValues();
|
||||
SaveLibraryAppletValues();
|
||||
} else {
|
||||
LOG_DEBUG(Config, "Saving only generic configuration values");
|
||||
}
|
||||
@ -691,6 +701,14 @@ void Config::SaveWebServiceValues() {
|
||||
EndGroup();
|
||||
}
|
||||
|
||||
void Config::SaveLibraryAppletValues() {
|
||||
BeginGroup(Settings::TranslateCategory(Settings::Category::LibraryApplet));
|
||||
|
||||
WriteCategory(Settings::Category::LibraryApplet);
|
||||
|
||||
EndGroup();
|
||||
}
|
||||
|
||||
bool Config::ReadBooleanSetting(const std::string& key, const std::optional<bool> default_value) {
|
||||
std::string full_key = GetFullKey(key, false);
|
||||
if (!default_value.has_value()) {
|
||||
|
@ -88,6 +88,7 @@ protected:
|
||||
void ReadSystemValues();
|
||||
void ReadWebServiceValues();
|
||||
void ReadNetworkValues();
|
||||
void ReadLibraryAppletValues();
|
||||
|
||||
// Read platform specific sections
|
||||
virtual void ReadHidbusValues() = 0;
|
||||
@ -121,6 +122,7 @@ protected:
|
||||
void SaveScreenshotValues();
|
||||
void SaveSystemValues();
|
||||
void SaveWebServiceValues();
|
||||
void SaveLibraryAppletValues();
|
||||
|
||||
// Save platform specific sections
|
||||
virtual void SaveHidbusValues() = 0;
|
||||
|
@ -60,10 +60,11 @@ public:
|
||||
Add(spv::ImageOperandsMask::ConstOffsets, offsets);
|
||||
}
|
||||
|
||||
explicit ImageOperands(Id lod, Id ms) {
|
||||
explicit ImageOperands(EmitContext& ctx, const IR::Value& offset, Id lod, Id ms) {
|
||||
if (Sirit::ValidId(lod)) {
|
||||
Add(spv::ImageOperandsMask::Lod, lod);
|
||||
}
|
||||
AddOffset(ctx, offset, ImageFetchOffsetAllowed);
|
||||
if (Sirit::ValidId(ms)) {
|
||||
Add(spv::ImageOperandsMask::Sample, ms);
|
||||
}
|
||||
@ -311,37 +312,6 @@ Id ImageGatherSubpixelOffset(EmitContext& ctx, const IR::TextureInstInfo& info,
|
||||
return coords;
|
||||
}
|
||||
}
|
||||
|
||||
void AddOffsetToCoordinates(EmitContext& ctx, const IR::TextureInstInfo& info, Id& coords,
|
||||
Id offset) {
|
||||
if (!Sirit::ValidId(offset)) {
|
||||
return;
|
||||
}
|
||||
|
||||
Id result_type{};
|
||||
switch (info.type) {
|
||||
case TextureType::Buffer:
|
||||
case TextureType::Color1D:
|
||||
case TextureType::ColorArray1D: {
|
||||
result_type = ctx.U32[1];
|
||||
break;
|
||||
}
|
||||
case TextureType::Color2D:
|
||||
case TextureType::Color2DRect:
|
||||
case TextureType::ColorArray2D: {
|
||||
result_type = ctx.U32[2];
|
||||
break;
|
||||
}
|
||||
case TextureType::Color3D: {
|
||||
result_type = ctx.U32[3];
|
||||
break;
|
||||
}
|
||||
case TextureType::ColorCube:
|
||||
case TextureType::ColorArrayCube:
|
||||
return;
|
||||
}
|
||||
coords = ctx.OpIAdd(result_type, coords, offset);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
Id EmitBindlessImageSampleImplicitLod(EmitContext&) {
|
||||
@ -524,10 +494,9 @@ Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
|
||||
operands.Span());
|
||||
}
|
||||
|
||||
Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset,
|
||||
Id lod, Id ms) {
|
||||
Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
|
||||
const IR::Value& offset, Id lod, Id ms) {
|
||||
const auto info{inst->Flags<IR::TextureInstInfo>()};
|
||||
AddOffsetToCoordinates(ctx, info, coords, offset);
|
||||
if (info.type == TextureType::Buffer) {
|
||||
lod = Id{};
|
||||
}
|
||||
@ -535,7 +504,7 @@ Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id c
|
||||
// This image is multisampled, lod must be implicit
|
||||
lod = Id{};
|
||||
}
|
||||
const ImageOperands operands(lod, ms);
|
||||
const ImageOperands operands(ctx, offset, lod, ms);
|
||||
return Emit(&EmitContext::OpImageSparseFetch, &EmitContext::OpImageFetch, ctx, inst, ctx.F32[4],
|
||||
TextureImage(ctx, info, index), coords, operands.MaskOptional(), operands.Span());
|
||||
}
|
||||
|
@ -537,8 +537,8 @@ Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id
|
||||
const IR::Value& offset, const IR::Value& offset2);
|
||||
Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
|
||||
const IR::Value& offset, const IR::Value& offset2, Id dref);
|
||||
Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset,
|
||||
Id lod, Id ms);
|
||||
Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
|
||||
const IR::Value& offset, Id lod, Id ms);
|
||||
Id EmitImageQueryDimensions(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id lod,
|
||||
const IR::Value& skip_mips);
|
||||
Id EmitImageQueryLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords);
|
||||
|
@ -18,6 +18,7 @@ add_library(video_core STATIC
|
||||
buffer_cache/usage_tracker.h
|
||||
buffer_cache/word_manager.h
|
||||
cache_types.h
|
||||
capture.h
|
||||
cdma_pusher.cpp
|
||||
cdma_pusher.h
|
||||
compatible_formats.cpp
|
||||
@ -59,8 +60,8 @@ add_library(video_core STATIC
|
||||
framebuffer_config.h
|
||||
fsr.cpp
|
||||
fsr.h
|
||||
host1x/codecs/codec.cpp
|
||||
host1x/codecs/codec.h
|
||||
host1x/codecs/decoder.cpp
|
||||
host1x/codecs/decoder.h
|
||||
host1x/codecs/h264.cpp
|
||||
host1x/codecs/h264.h
|
||||
host1x/codecs/vp8.cpp
|
||||
@ -79,8 +80,6 @@ add_library(video_core STATIC
|
||||
host1x/nvdec.cpp
|
||||
host1x/nvdec.h
|
||||
host1x/nvdec_common.h
|
||||
host1x/sync_manager.cpp
|
||||
host1x/sync_manager.h
|
||||
host1x/syncpoint_manager.cpp
|
||||
host1x/syncpoint_manager.h
|
||||
host1x/vic.cpp
|
||||
@ -101,6 +100,7 @@ add_library(video_core STATIC
|
||||
memory_manager.cpp
|
||||
memory_manager.h
|
||||
precompiled_headers.h
|
||||
present.h
|
||||
pte_kind.h
|
||||
query_cache/bank_base.h
|
||||
query_cache/query_base.h
|
||||
@ -274,7 +274,6 @@ add_library(video_core STATIC
|
||||
texture_cache/image_view_info.h
|
||||
texture_cache/render_targets.h
|
||||
texture_cache/samples_helper.h
|
||||
texture_cache/slot_vector.h
|
||||
texture_cache/texture_cache.cpp
|
||||
texture_cache/texture_cache.h
|
||||
texture_cache/texture_cache_base.h
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
|
||||
#include "common/range_sets.inc"
|
||||
#include "video_core/buffer_cache/buffer_cache_base.h"
|
||||
#include "video_core/guest_memory.h"
|
||||
#include "video_core/host1x/gpu_device_memory_manager.h"
|
||||
@ -20,7 +21,7 @@ BufferCache<P>::BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, R
|
||||
: runtime{runtime_}, device_memory{device_memory_}, memory_tracker{device_memory} {
|
||||
// Ensure the first slot is used for the null buffer
|
||||
void(slot_buffers.insert(runtime, NullBufferParams{}));
|
||||
common_ranges.clear();
|
||||
gpu_modified_ranges.Clear();
|
||||
inline_buffer_id = NULL_BUFFER_ID;
|
||||
|
||||
if (!runtime.CanReportMemoryUsage()) {
|
||||
@ -43,6 +44,9 @@ BufferCache<P>::BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, R
|
||||
DEFAULT_CRITICAL_MEMORY));
|
||||
}
|
||||
|
||||
template <class P>
|
||||
BufferCache<P>::~BufferCache() = default;
|
||||
|
||||
template <class P>
|
||||
void BufferCache<P>::RunGarbageCollector() {
|
||||
const bool aggressive_gc = total_used_memory >= critical_memory;
|
||||
@ -96,20 +100,17 @@ void BufferCache<P>::TickFrame() {
|
||||
++frame_tick;
|
||||
delayed_destruction_ring.Tick();
|
||||
|
||||
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
||||
for (auto& buffer : async_buffers_death_ring) {
|
||||
runtime.FreeDeferredStagingBuffer(buffer);
|
||||
}
|
||||
async_buffers_death_ring.clear();
|
||||
for (auto& buffer : async_buffers_death_ring) {
|
||||
runtime.FreeDeferredStagingBuffer(buffer);
|
||||
}
|
||||
async_buffers_death_ring.clear();
|
||||
}
|
||||
|
||||
template <class P>
|
||||
void BufferCache<P>::WriteMemory(DAddr device_addr, u64 size) {
|
||||
if (memory_tracker.IsRegionGpuModified(device_addr, size)) {
|
||||
const IntervalType subtract_interval{device_addr, device_addr + size};
|
||||
ClearDownload(subtract_interval);
|
||||
common_ranges.subtract(subtract_interval);
|
||||
ClearDownload(device_addr, size);
|
||||
gpu_modified_ranges.Subtract(device_addr, size);
|
||||
}
|
||||
memory_tracker.MarkRegionAsCpuModified(device_addr, size);
|
||||
}
|
||||
@ -174,11 +175,11 @@ void BufferCache<P>::DownloadMemory(DAddr device_addr, u64 size) {
|
||||
}
|
||||
|
||||
template <class P>
|
||||
void BufferCache<P>::ClearDownload(IntervalType subtract_interval) {
|
||||
RemoveEachInOverlapCounter(async_downloads, subtract_interval, -1024);
|
||||
uncommitted_ranges.subtract(subtract_interval);
|
||||
for (auto& interval_set : committed_ranges) {
|
||||
interval_set.subtract(subtract_interval);
|
||||
void BufferCache<P>::ClearDownload(DAddr device_addr, u64 size) {
|
||||
async_downloads.DeleteAll(device_addr, size);
|
||||
uncommitted_gpu_modified_ranges.Subtract(device_addr, size);
|
||||
for (auto& interval_set : committed_gpu_modified_ranges) {
|
||||
interval_set.Subtract(device_addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -195,8 +196,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
|
||||
return false;
|
||||
}
|
||||
|
||||
const IntervalType subtract_interval{*cpu_dest_address, *cpu_dest_address + amount};
|
||||
ClearDownload(subtract_interval);
|
||||
ClearDownload(*cpu_dest_address, amount);
|
||||
|
||||
BufferId buffer_a;
|
||||
BufferId buffer_b;
|
||||
@ -215,21 +215,20 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
|
||||
.size = amount,
|
||||
}};
|
||||
|
||||
boost::container::small_vector<IntervalType, 4> tmp_intervals;
|
||||
boost::container::small_vector<std::pair<DAddr, size_t>, 4> tmp_intervals;
|
||||
auto mirror = [&](DAddr base_address, DAddr base_address_end) {
|
||||
const u64 size = base_address_end - base_address;
|
||||
const DAddr diff = base_address - *cpu_src_address;
|
||||
const DAddr new_base_address = *cpu_dest_address + diff;
|
||||
const IntervalType add_interval{new_base_address, new_base_address + size};
|
||||
tmp_intervals.push_back(add_interval);
|
||||
uncommitted_ranges.add(add_interval);
|
||||
tmp_intervals.push_back({new_base_address, size});
|
||||
uncommitted_gpu_modified_ranges.Add(new_base_address, size);
|
||||
};
|
||||
ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror);
|
||||
gpu_modified_ranges.ForEachInRange(*cpu_src_address, amount, mirror);
|
||||
// This subtraction in this order is important for overlapping copies.
|
||||
common_ranges.subtract(subtract_interval);
|
||||
gpu_modified_ranges.Subtract(*cpu_dest_address, amount);
|
||||
const bool has_new_downloads = tmp_intervals.size() != 0;
|
||||
for (const IntervalType& add_interval : tmp_intervals) {
|
||||
common_ranges.add(add_interval);
|
||||
for (const auto& pair : tmp_intervals) {
|
||||
gpu_modified_ranges.Add(pair.first, pair.second);
|
||||
}
|
||||
const auto& copy = copies[0];
|
||||
src_buffer.MarkUsage(copy.src_offset, copy.size);
|
||||
@ -257,9 +256,8 @@ bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
|
||||
}
|
||||
|
||||
const size_t size = amount * sizeof(u32);
|
||||
const IntervalType subtract_interval{*cpu_dst_address, *cpu_dst_address + size};
|
||||
ClearDownload(subtract_interval);
|
||||
common_ranges.subtract(subtract_interval);
|
||||
ClearDownload(*cpu_dst_address, size);
|
||||
gpu_modified_ranges.Subtract(*cpu_dst_address, size);
|
||||
|
||||
const BufferId buffer = FindBuffer(*cpu_dst_address, static_cast<u32>(size));
|
||||
Buffer& dest_buffer = slot_buffers[buffer];
|
||||
@ -300,11 +298,11 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainCPUBuffer(
|
||||
MarkWrittenBuffer(buffer_id, device_addr, size);
|
||||
break;
|
||||
case ObtainBufferOperation::DiscardWrite: {
|
||||
DAddr device_addr_start = Common::AlignDown(device_addr, 64);
|
||||
DAddr device_addr_end = Common::AlignUp(device_addr + size, 64);
|
||||
IntervalType interval{device_addr_start, device_addr_end};
|
||||
ClearDownload(interval);
|
||||
common_ranges.subtract(interval);
|
||||
const DAddr device_addr_start = Common::AlignDown(device_addr, 64);
|
||||
const DAddr device_addr_end = Common::AlignUp(device_addr + size, 64);
|
||||
const size_t new_size = device_addr_end - device_addr_start;
|
||||
ClearDownload(device_addr_start, new_size);
|
||||
gpu_modified_ranges.Subtract(device_addr_start, new_size);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -504,46 +502,40 @@ void BufferCache<P>::FlushCachedWrites() {
|
||||
|
||||
template <class P>
|
||||
bool BufferCache<P>::HasUncommittedFlushes() const noexcept {
|
||||
return !uncommitted_ranges.empty() || !committed_ranges.empty();
|
||||
return !uncommitted_gpu_modified_ranges.Empty() || !committed_gpu_modified_ranges.empty();
|
||||
}
|
||||
|
||||
template <class P>
|
||||
void BufferCache<P>::AccumulateFlushes() {
|
||||
if (uncommitted_ranges.empty()) {
|
||||
if (uncommitted_gpu_modified_ranges.Empty()) {
|
||||
return;
|
||||
}
|
||||
committed_ranges.emplace_back(std::move(uncommitted_ranges));
|
||||
committed_gpu_modified_ranges.emplace_back(std::move(uncommitted_gpu_modified_ranges));
|
||||
}
|
||||
|
||||
template <class P>
|
||||
bool BufferCache<P>::ShouldWaitAsyncFlushes() const noexcept {
|
||||
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
||||
return (!async_buffers.empty() && async_buffers.front().has_value());
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return (!async_buffers.empty() && async_buffers.front().has_value());
|
||||
}
|
||||
|
||||
template <class P>
|
||||
void BufferCache<P>::CommitAsyncFlushesHigh() {
|
||||
AccumulateFlushes();
|
||||
|
||||
if (committed_ranges.empty()) {
|
||||
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
||||
async_buffers.emplace_back(std::optional<Async_Buffer>{});
|
||||
}
|
||||
if (committed_gpu_modified_ranges.empty()) {
|
||||
async_buffers.emplace_back(std::optional<Async_Buffer>{});
|
||||
return;
|
||||
}
|
||||
MICROPROFILE_SCOPE(GPU_DownloadMemory);
|
||||
|
||||
auto it = committed_ranges.begin();
|
||||
while (it != committed_ranges.end()) {
|
||||
auto it = committed_gpu_modified_ranges.begin();
|
||||
while (it != committed_gpu_modified_ranges.end()) {
|
||||
auto& current_intervals = *it;
|
||||
auto next_it = std::next(it);
|
||||
while (next_it != committed_ranges.end()) {
|
||||
for (auto& interval : *next_it) {
|
||||
current_intervals.subtract(interval);
|
||||
}
|
||||
while (next_it != committed_gpu_modified_ranges.end()) {
|
||||
next_it->ForEach([¤t_intervals](DAddr start, DAddr end) {
|
||||
current_intervals.Subtract(start, end - start);
|
||||
});
|
||||
next_it++;
|
||||
}
|
||||
it++;
|
||||
@ -552,10 +544,10 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
|
||||
boost::container::small_vector<std::pair<BufferCopy, BufferId>, 16> downloads;
|
||||
u64 total_size_bytes = 0;
|
||||
u64 largest_copy = 0;
|
||||
for (const IntervalSet& intervals : committed_ranges) {
|
||||
for (auto& interval : intervals) {
|
||||
const std::size_t size = interval.upper() - interval.lower();
|
||||
const DAddr device_addr = interval.lower();
|
||||
for (const Common::RangeSet<DAddr>& range_set : committed_gpu_modified_ranges) {
|
||||
range_set.ForEach([&](DAddr interval_lower, DAddr interval_upper) {
|
||||
const std::size_t size = interval_upper - interval_lower;
|
||||
const DAddr device_addr = interval_lower;
|
||||
ForEachBufferInRange(device_addr, size, [&](BufferId buffer_id, Buffer& buffer) {
|
||||
const DAddr buffer_start = buffer.CpuAddr();
|
||||
const DAddr buffer_end = buffer_start + buffer.SizeBytes();
|
||||
@ -583,77 +575,35 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
|
||||
largest_copy = std::max(largest_copy, new_size);
|
||||
};
|
||||
|
||||
ForEachInRangeSet(common_ranges, device_addr_out, range_size, add_download);
|
||||
gpu_modified_ranges.ForEachInRange(device_addr_out, range_size,
|
||||
add_download);
|
||||
});
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
committed_ranges.clear();
|
||||
committed_gpu_modified_ranges.clear();
|
||||
if (downloads.empty()) {
|
||||
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
||||
async_buffers.emplace_back(std::optional<Async_Buffer>{});
|
||||
}
|
||||
async_buffers.emplace_back(std::optional<Async_Buffer>{});
|
||||
return;
|
||||
}
|
||||
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
||||
auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
|
||||
boost::container::small_vector<BufferCopy, 4> normalized_copies;
|
||||
IntervalSet new_async_range{};
|
||||
runtime.PreCopyBarrier();
|
||||
for (auto& [copy, buffer_id] : downloads) {
|
||||
copy.dst_offset += download_staging.offset;
|
||||
const std::array copies{copy};
|
||||
BufferCopy second_copy{copy};
|
||||
Buffer& buffer = slot_buffers[buffer_id];
|
||||
second_copy.src_offset = static_cast<size_t>(buffer.CpuAddr()) + copy.src_offset;
|
||||
DAddr orig_device_addr = static_cast<DAddr>(second_copy.src_offset);
|
||||
const IntervalType base_interval{orig_device_addr, orig_device_addr + copy.size};
|
||||
async_downloads += std::make_pair(base_interval, 1);
|
||||
buffer.MarkUsage(copy.src_offset, copy.size);
|
||||
runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
|
||||
normalized_copies.push_back(second_copy);
|
||||
}
|
||||
runtime.PostCopyBarrier();
|
||||
pending_downloads.emplace_back(std::move(normalized_copies));
|
||||
async_buffers.emplace_back(download_staging);
|
||||
} else {
|
||||
if (!Settings::IsGPULevelHigh()) {
|
||||
committed_ranges.clear();
|
||||
uncommitted_ranges.clear();
|
||||
} else {
|
||||
if constexpr (USE_MEMORY_MAPS) {
|
||||
auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
|
||||
runtime.PreCopyBarrier();
|
||||
for (auto& [copy, buffer_id] : downloads) {
|
||||
// Have in mind the staging buffer offset for the copy
|
||||
copy.dst_offset += download_staging.offset;
|
||||
const std::array copies{copy};
|
||||
Buffer& buffer = slot_buffers[buffer_id];
|
||||
buffer.MarkUsage(copy.src_offset, copy.size);
|
||||
runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
|
||||
}
|
||||
runtime.PostCopyBarrier();
|
||||
runtime.Finish();
|
||||
for (const auto& [copy, buffer_id] : downloads) {
|
||||
const Buffer& buffer = slot_buffers[buffer_id];
|
||||
const DAddr device_addr = buffer.CpuAddr() + copy.src_offset;
|
||||
// Undo the modified offset
|
||||
const u64 dst_offset = copy.dst_offset - download_staging.offset;
|
||||
const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
|
||||
device_memory.WriteBlockUnsafe(device_addr, read_mapped_memory, copy.size);
|
||||
}
|
||||
} else {
|
||||
const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
|
||||
for (const auto& [copy, buffer_id] : downloads) {
|
||||
Buffer& buffer = slot_buffers[buffer_id];
|
||||
buffer.ImmediateDownload(copy.src_offset,
|
||||
immediate_buffer.subspan(0, copy.size));
|
||||
const DAddr device_addr = buffer.CpuAddr() + copy.src_offset;
|
||||
device_memory.WriteBlockUnsafe(device_addr, immediate_buffer.data(), copy.size);
|
||||
}
|
||||
}
|
||||
}
|
||||
auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
|
||||
boost::container::small_vector<BufferCopy, 4> normalized_copies;
|
||||
runtime.PreCopyBarrier();
|
||||
for (auto& [copy, buffer_id] : downloads) {
|
||||
copy.dst_offset += download_staging.offset;
|
||||
const std::array copies{copy};
|
||||
BufferCopy second_copy{copy};
|
||||
Buffer& buffer = slot_buffers[buffer_id];
|
||||
second_copy.src_offset = static_cast<size_t>(buffer.CpuAddr()) + copy.src_offset;
|
||||
const DAddr orig_device_addr = static_cast<DAddr>(second_copy.src_offset);
|
||||
async_downloads.Add(orig_device_addr, copy.size);
|
||||
buffer.MarkUsage(copy.src_offset, copy.size);
|
||||
runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
|
||||
normalized_copies.push_back(second_copy);
|
||||
}
|
||||
runtime.PostCopyBarrier();
|
||||
pending_downloads.emplace_back(std::move(normalized_copies));
|
||||
async_buffers.emplace_back(download_staging);
|
||||
}
|
||||
|
||||
template <class P>
|
||||
@ -676,37 +626,31 @@ void BufferCache<P>::PopAsyncBuffers() {
|
||||
async_buffers.pop_front();
|
||||
return;
|
||||
}
|
||||
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
||||
auto& downloads = pending_downloads.front();
|
||||
auto& async_buffer = async_buffers.front();
|
||||
u8* base = async_buffer->mapped_span.data();
|
||||
const size_t base_offset = async_buffer->offset;
|
||||
for (const auto& copy : downloads) {
|
||||
const DAddr device_addr = static_cast<DAddr>(copy.src_offset);
|
||||
const u64 dst_offset = copy.dst_offset - base_offset;
|
||||
const u8* read_mapped_memory = base + dst_offset;
|
||||
ForEachInOverlapCounter(
|
||||
async_downloads, device_addr, copy.size, [&](DAddr start, DAddr end, int count) {
|
||||
device_memory.WriteBlockUnsafe(start, &read_mapped_memory[start - device_addr],
|
||||
end - start);
|
||||
if (count == 1) {
|
||||
const IntervalType base_interval{start, end};
|
||||
common_ranges.subtract(base_interval);
|
||||
}
|
||||
});
|
||||
const IntervalType subtract_interval{device_addr, device_addr + copy.size};
|
||||
RemoveEachInOverlapCounter(async_downloads, subtract_interval, -1);
|
||||
}
|
||||
async_buffers_death_ring.emplace_back(*async_buffer);
|
||||
async_buffers.pop_front();
|
||||
pending_downloads.pop_front();
|
||||
auto& downloads = pending_downloads.front();
|
||||
auto& async_buffer = async_buffers.front();
|
||||
u8* base = async_buffer->mapped_span.data();
|
||||
const size_t base_offset = async_buffer->offset;
|
||||
for (const auto& copy : downloads) {
|
||||
const DAddr device_addr = static_cast<DAddr>(copy.src_offset);
|
||||
const u64 dst_offset = copy.dst_offset - base_offset;
|
||||
const u8* read_mapped_memory = base + dst_offset;
|
||||
async_downloads.ForEachInRange(device_addr, copy.size, [&](DAddr start, DAddr end, s32) {
|
||||
device_memory.WriteBlockUnsafe(start, &read_mapped_memory[start - device_addr],
|
||||
end - start);
|
||||
});
|
||||
async_downloads.Subtract(device_addr, copy.size, [&](DAddr start, DAddr end) {
|
||||
gpu_modified_ranges.Subtract(start, end - start);
|
||||
});
|
||||
}
|
||||
async_buffers_death_ring.emplace_back(*async_buffer);
|
||||
async_buffers.pop_front();
|
||||
pending_downloads.pop_front();
|
||||
}
|
||||
|
||||
template <class P>
|
||||
bool BufferCache<P>::IsRegionGpuModified(DAddr addr, size_t size) {
|
||||
bool is_dirty = false;
|
||||
ForEachInRangeSet(common_ranges, addr, size, [&](DAddr, DAddr) { is_dirty = true; });
|
||||
gpu_modified_ranges.ForEachInRange(addr, size, [&](DAddr, DAddr) { is_dirty = true; });
|
||||
return is_dirty;
|
||||
}
|
||||
|
||||
@ -1320,10 +1264,8 @@ void BufferCache<P>::UpdateComputeTextureBuffers() {
|
||||
template <class P>
|
||||
void BufferCache<P>::MarkWrittenBuffer(BufferId buffer_id, DAddr device_addr, u32 size) {
|
||||
memory_tracker.MarkRegionAsGpuModified(device_addr, size);
|
||||
|
||||
const IntervalType base_interval{device_addr, device_addr + size};
|
||||
common_ranges.add(base_interval);
|
||||
uncommitted_ranges.add(base_interval);
|
||||
gpu_modified_ranges.Add(device_addr, size);
|
||||
uncommitted_gpu_modified_ranges.Add(device_addr, size);
|
||||
}
|
||||
|
||||
template <class P>
|
||||
@ -1546,7 +1488,10 @@ void BufferCache<P>::ImmediateUploadMemory([[maybe_unused]] Buffer& buffer,
|
||||
std::span<const u8> upload_span;
|
||||
const DAddr device_addr = buffer.CpuAddr() + copy.dst_offset;
|
||||
if (IsRangeGranular(device_addr, copy.size)) {
|
||||
upload_span = std::span(device_memory.GetPointer<u8>(device_addr), copy.size);
|
||||
auto* const ptr = device_memory.GetPointer<u8>(device_addr);
|
||||
if (ptr != nullptr) {
|
||||
upload_span = std::span(ptr, copy.size);
|
||||
}
|
||||
} else {
|
||||
if (immediate_buffer.empty()) {
|
||||
immediate_buffer = ImmediateBuffer(largest_copy);
|
||||
@ -1600,9 +1545,8 @@ bool BufferCache<P>::InlineMemory(DAddr dest_address, size_t copy_size,
|
||||
template <class P>
|
||||
void BufferCache<P>::InlineMemoryImplementation(DAddr dest_address, size_t copy_size,
|
||||
std::span<const u8> inlined_buffer) {
|
||||
const IntervalType subtract_interval{dest_address, dest_address + copy_size};
|
||||
ClearDownload(subtract_interval);
|
||||
common_ranges.subtract(subtract_interval);
|
||||
ClearDownload(dest_address, copy_size);
|
||||
gpu_modified_ranges.Subtract(dest_address, copy_size);
|
||||
|
||||
BufferId buffer_id = FindBuffer(dest_address, static_cast<u32>(copy_size));
|
||||
auto& buffer = slot_buffers[buffer_id];
|
||||
@ -1652,12 +1596,9 @@ void BufferCache<P>::DownloadBufferMemory(Buffer& buffer, DAddr device_addr, u64
|
||||
largest_copy = std::max(largest_copy, new_size);
|
||||
};
|
||||
|
||||
const DAddr start_address = device_addr_out;
|
||||
const DAddr end_address = start_address + range_size;
|
||||
ForEachInRangeSet(common_ranges, start_address, range_size, add_download);
|
||||
const IntervalType subtract_interval{start_address, end_address};
|
||||
ClearDownload(subtract_interval);
|
||||
common_ranges.subtract(subtract_interval);
|
||||
gpu_modified_ranges.ForEachInRange(device_addr_out, range_size, add_download);
|
||||
ClearDownload(device_addr_out, range_size);
|
||||
gpu_modified_ranges.Subtract(device_addr_out, range_size);
|
||||
});
|
||||
if (total_size_bytes == 0) {
|
||||
return;
|
||||
|
@ -13,25 +13,15 @@
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
#define BOOST_NO_MT
|
||||
#include <boost/pool/detail/mutex.hpp>
|
||||
#undef BOOST_NO_MT
|
||||
#include <boost/icl/interval.hpp>
|
||||
#include <boost/icl/interval_base_set.hpp>
|
||||
#include <boost/icl/interval_set.hpp>
|
||||
#include <boost/icl/split_interval_map.hpp>
|
||||
#include <boost/pool/pool.hpp>
|
||||
#include <boost/pool/pool_alloc.hpp>
|
||||
#include <boost/pool/poolfwd.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/div_ceil.h"
|
||||
#include "common/literals.h"
|
||||
#include "common/lru_cache.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/range_sets.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/settings.h"
|
||||
#include "common/slot_vector.h"
|
||||
#include "video_core/buffer_cache/buffer_base.h"
|
||||
#include "video_core/control/channel_state_cache.h"
|
||||
#include "video_core/delayed_destruction_ring.h"
|
||||
@ -41,21 +31,15 @@
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/surface.h"
|
||||
#include "video_core/texture_cache/slot_vector.h"
|
||||
#include "video_core/texture_cache/types.h"
|
||||
|
||||
namespace boost {
|
||||
template <typename T>
|
||||
class fast_pool_allocator<T, default_user_allocator_new_delete, details::pool::null_mutex, 4096, 0>;
|
||||
}
|
||||
|
||||
namespace VideoCommon {
|
||||
|
||||
MICROPROFILE_DECLARE(GPU_PrepareBuffers);
|
||||
MICROPROFILE_DECLARE(GPU_BindUploadBuffers);
|
||||
MICROPROFILE_DECLARE(GPU_DownloadMemory);
|
||||
|
||||
using BufferId = SlotId;
|
||||
using BufferId = Common::SlotId;
|
||||
|
||||
using VideoCore::Surface::PixelFormat;
|
||||
using namespace Common::Literals;
|
||||
@ -184,7 +168,6 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<BufferCacheChannelInf
|
||||
static constexpr bool NEEDS_BIND_STORAGE_INDEX = P::NEEDS_BIND_STORAGE_INDEX;
|
||||
static constexpr bool USE_MEMORY_MAPS = P::USE_MEMORY_MAPS;
|
||||
static constexpr bool SEPARATE_IMAGE_BUFFERS_BINDINGS = P::SEPARATE_IMAGE_BUFFER_BINDINGS;
|
||||
static constexpr bool IMPLEMENTS_ASYNC_DOWNLOADS = P::IMPLEMENTS_ASYNC_DOWNLOADS;
|
||||
static constexpr bool USE_MEMORY_MAPS_FOR_UPLOADS = P::USE_MEMORY_MAPS_FOR_UPLOADS;
|
||||
|
||||
static constexpr s64 DEFAULT_EXPECTED_MEMORY = 512_MiB;
|
||||
@ -202,34 +185,6 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<BufferCacheChannelInf
|
||||
using Async_Buffer = typename P::Async_Buffer;
|
||||
using MemoryTracker = typename P::MemoryTracker;
|
||||
|
||||
using IntervalCompare = std::less<DAddr>;
|
||||
using IntervalInstance = boost::icl::interval_type_default<DAddr, std::less>;
|
||||
using IntervalAllocator = boost::fast_pool_allocator<DAddr>;
|
||||
using IntervalSet = boost::icl::interval_set<DAddr>;
|
||||
using IntervalType = typename IntervalSet::interval_type;
|
||||
|
||||
template <typename Type>
|
||||
struct counter_add_functor : public boost::icl::identity_based_inplace_combine<Type> {
|
||||
// types
|
||||
typedef counter_add_functor<Type> type;
|
||||
typedef boost::icl::identity_based_inplace_combine<Type> base_type;
|
||||
|
||||
// public member functions
|
||||
void operator()(Type& current, const Type& added) const {
|
||||
current += added;
|
||||
if (current < base_type::identity_element()) {
|
||||
current = base_type::identity_element();
|
||||
}
|
||||
}
|
||||
|
||||
// public static functions
|
||||
static void version(Type&){};
|
||||
};
|
||||
|
||||
using OverlapCombine = counter_add_functor<int>;
|
||||
using OverlapSection = boost::icl::inter_section<int>;
|
||||
using OverlapCounter = boost::icl::split_interval_map<DAddr, int>;
|
||||
|
||||
struct OverlapResult {
|
||||
boost::container::small_vector<BufferId, 16> ids;
|
||||
DAddr begin;
|
||||
@ -240,6 +195,8 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<BufferCacheChannelInf
|
||||
public:
|
||||
explicit BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, Runtime& runtime_);
|
||||
|
||||
~BufferCache();
|
||||
|
||||
void TickFrame();
|
||||
|
||||
void WriteMemory(DAddr device_addr, u64 size);
|
||||
@ -379,75 +336,6 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void ForEachInRangeSet(IntervalSet& current_range, DAddr device_addr, u64 size, Func&& func) {
|
||||
const DAddr start_address = device_addr;
|
||||
const DAddr end_address = start_address + size;
|
||||
const IntervalType search_interval{start_address, end_address};
|
||||
auto it = current_range.lower_bound(search_interval);
|
||||
if (it == current_range.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = current_range.upper_bound(search_interval);
|
||||
for (; it != end_it; it++) {
|
||||
DAddr inter_addr_end = it->upper();
|
||||
DAddr inter_addr = it->lower();
|
||||
if (inter_addr_end > end_address) {
|
||||
inter_addr_end = end_address;
|
||||
}
|
||||
if (inter_addr < start_address) {
|
||||
inter_addr = start_address;
|
||||
}
|
||||
func(inter_addr, inter_addr_end);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void ForEachInOverlapCounter(OverlapCounter& current_range, DAddr device_addr, u64 size,
|
||||
Func&& func) {
|
||||
const DAddr start_address = device_addr;
|
||||
const DAddr end_address = start_address + size;
|
||||
const IntervalType search_interval{start_address, end_address};
|
||||
auto it = current_range.lower_bound(search_interval);
|
||||
if (it == current_range.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = current_range.upper_bound(search_interval);
|
||||
for (; it != end_it; it++) {
|
||||
auto& inter = it->first;
|
||||
DAddr inter_addr_end = inter.upper();
|
||||
DAddr inter_addr = inter.lower();
|
||||
if (inter_addr_end > end_address) {
|
||||
inter_addr_end = end_address;
|
||||
}
|
||||
if (inter_addr < start_address) {
|
||||
inter_addr = start_address;
|
||||
}
|
||||
func(inter_addr, inter_addr_end, it->second);
|
||||
}
|
||||
}
|
||||
|
||||
void RemoveEachInOverlapCounter(OverlapCounter& current_range,
|
||||
const IntervalType search_interval, int subtract_value) {
|
||||
bool any_removals = false;
|
||||
current_range.add(std::make_pair(search_interval, subtract_value));
|
||||
do {
|
||||
any_removals = false;
|
||||
auto it = current_range.lower_bound(search_interval);
|
||||
if (it == current_range.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = current_range.upper_bound(search_interval);
|
||||
for (; it != end_it; it++) {
|
||||
if (it->second <= 0) {
|
||||
any_removals = true;
|
||||
current_range.erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} while (any_removals);
|
||||
}
|
||||
|
||||
static bool IsRangeGranular(DAddr device_addr, size_t size) {
|
||||
return (device_addr & ~Core::DEVICE_PAGEMASK) ==
|
||||
((device_addr + size) & ~Core::DEVICE_PAGEMASK);
|
||||
@ -552,14 +440,14 @@ private:
|
||||
|
||||
[[nodiscard]] bool HasFastUniformBufferBound(size_t stage, u32 binding_index) const noexcept;
|
||||
|
||||
void ClearDownload(IntervalType subtract_interval);
|
||||
void ClearDownload(DAddr base_addr, u64 size);
|
||||
|
||||
void InlineMemoryImplementation(DAddr dest_address, size_t copy_size,
|
||||
std::span<const u8> inlined_buffer);
|
||||
|
||||
Tegra::MaxwellDeviceMemoryManager& device_memory;
|
||||
|
||||
SlotVector<Buffer> slot_buffers;
|
||||
Common::SlotVector<Buffer> slot_buffers;
|
||||
DelayedDestructionRing<Buffer, 8> delayed_destruction_ring;
|
||||
|
||||
const Tegra::Engines::DrawManager::IndirectParams* current_draw_indirect{};
|
||||
@ -567,13 +455,12 @@ private:
|
||||
u32 last_index_count = 0;
|
||||
|
||||
MemoryTracker memory_tracker;
|
||||
IntervalSet uncommitted_ranges;
|
||||
IntervalSet common_ranges;
|
||||
IntervalSet cached_ranges;
|
||||
std::deque<IntervalSet> committed_ranges;
|
||||
Common::RangeSet<DAddr> uncommitted_gpu_modified_ranges;
|
||||
Common::RangeSet<DAddr> gpu_modified_ranges;
|
||||
std::deque<Common::RangeSet<DAddr>> committed_gpu_modified_ranges;
|
||||
|
||||
// Async Buffers
|
||||
OverlapCounter async_downloads;
|
||||
Common::OverlapRangeSet<DAddr> async_downloads;
|
||||
std::deque<std::optional<Async_Buffer>> async_buffers;
|
||||
std::deque<boost::container::small_vector<BufferCopy, 4>> pending_downloads;
|
||||
std::optional<Async_Buffer> current_buffer;
|
||||
|
36
src/video_core/capture.h
Normal file
36
src/video_core/capture.h
Normal file
@ -0,0 +1,36 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2024 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/frontend/framebuffer_layout.h"
|
||||
#include "video_core/surface.h"
|
||||
|
||||
namespace VideoCore::Capture {
|
||||
|
||||
constexpr u32 BlockHeight = 4;
|
||||
constexpr u32 BlockDepth = 0;
|
||||
constexpr u32 BppLog2 = 2;
|
||||
|
||||
constexpr auto PixelFormat = Surface::PixelFormat::B8G8R8A8_UNORM;
|
||||
|
||||
constexpr auto LinearWidth = Layout::ScreenUndocked::Width;
|
||||
constexpr auto LinearHeight = Layout::ScreenUndocked::Height;
|
||||
constexpr auto LinearDepth = 1U;
|
||||
constexpr auto BytesPerPixel = 4U;
|
||||
|
||||
constexpr auto TiledWidth = LinearWidth;
|
||||
constexpr auto TiledHeight = Common::AlignUpLog2(LinearHeight, BlockHeight + BlockDepth + BppLog2);
|
||||
constexpr auto TiledSize = TiledWidth * TiledHeight * (1 << BppLog2);
|
||||
|
||||
constexpr Layout::FramebufferLayout Layout{
|
||||
.width = LinearWidth,
|
||||
.height = LinearHeight,
|
||||
.screen = {0, 0, LinearWidth, LinearHeight},
|
||||
.is_srgb = false,
|
||||
};
|
||||
|
||||
} // namespace VideoCore::Capture
|
@ -2,136 +2,130 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#include <bit>
|
||||
|
||||
#include "common/thread.h"
|
||||
#include "core/core.h"
|
||||
#include "video_core/cdma_pusher.h"
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/host1x/control.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/nvdec.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
#include "video_core/host1x/sync_manager.h"
|
||||
#include "video_core/host1x/vic.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Tegra {
|
||||
CDmaPusher::CDmaPusher(Host1x::Host1x& host1x_)
|
||||
: host1x{host1x_}, nvdec_processor(std::make_shared<Host1x::Nvdec>(host1x)),
|
||||
vic_processor(std::make_unique<Host1x::Vic>(host1x, nvdec_processor)),
|
||||
host1x_processor(std::make_unique<Host1x::Control>(host1x)),
|
||||
sync_manager(std::make_unique<Host1x::SyncptIncrManager>(host1x)) {}
|
||||
|
||||
CDmaPusher::CDmaPusher(Host1x::Host1x& host1x_, s32 id)
|
||||
: host1x{host1x_}, memory_manager{host1x.GMMU()},
|
||||
host_processor{std::make_unique<Host1x::Control>(host1x_)}, current_class{
|
||||
static_cast<ChClassId>(id)} {
|
||||
thread = std::jthread([this](std::stop_token stop_token) { ProcessEntries(stop_token); });
|
||||
}
|
||||
|
||||
CDmaPusher::~CDmaPusher() = default;
|
||||
|
||||
void CDmaPusher::ProcessEntries(ChCommandHeaderList&& entries) {
|
||||
for (const auto& value : entries) {
|
||||
if (mask != 0) {
|
||||
const auto lbs = static_cast<u32>(std::countr_zero(mask));
|
||||
mask &= ~(1U << lbs);
|
||||
ExecuteCommand(offset + lbs, value.raw);
|
||||
continue;
|
||||
} else if (count != 0) {
|
||||
--count;
|
||||
ExecuteCommand(offset, value.raw);
|
||||
if (incrementing) {
|
||||
++offset;
|
||||
void CDmaPusher::ProcessEntries(std::stop_token stop_token) {
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||
ChCommandHeaderList command_list{host1x.System().ApplicationMemory(), 0, 0};
|
||||
u32 count{};
|
||||
u32 method_offset{};
|
||||
u32 mask{};
|
||||
bool incrementing{};
|
||||
|
||||
while (!stop_token.stop_requested()) {
|
||||
{
|
||||
std::unique_lock l{command_mutex};
|
||||
Common::CondvarWait(command_cv, l, stop_token,
|
||||
[this]() { return command_lists.size() > 0; });
|
||||
if (stop_token.stop_requested()) {
|
||||
return;
|
||||
}
|
||||
continue;
|
||||
|
||||
command_list = std::move(command_lists.front());
|
||||
command_lists.pop_front();
|
||||
}
|
||||
const auto mode = value.submission_mode.Value();
|
||||
switch (mode) {
|
||||
case ChSubmissionMode::SetClass: {
|
||||
mask = value.value & 0x3f;
|
||||
offset = value.method_offset;
|
||||
current_class = static_cast<ChClassId>((value.value >> 6) & 0x3ff);
|
||||
break;
|
||||
}
|
||||
case ChSubmissionMode::Incrementing:
|
||||
case ChSubmissionMode::NonIncrementing:
|
||||
count = value.value;
|
||||
offset = value.method_offset;
|
||||
incrementing = mode == ChSubmissionMode::Incrementing;
|
||||
break;
|
||||
case ChSubmissionMode::Mask:
|
||||
mask = value.value;
|
||||
offset = value.method_offset;
|
||||
break;
|
||||
case ChSubmissionMode::Immediate: {
|
||||
const u32 data = value.value & 0xfff;
|
||||
offset = value.method_offset;
|
||||
ExecuteCommand(offset, data);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("ChSubmission mode {} is not implemented!", static_cast<u32>(mode));
|
||||
break;
|
||||
|
||||
size_t i = 0;
|
||||
for (const auto value : command_list) {
|
||||
i++;
|
||||
if (mask != 0) {
|
||||
const auto lbs = static_cast<u32>(std::countr_zero(mask));
|
||||
mask &= ~(1U << lbs);
|
||||
ExecuteCommand(method_offset + lbs, value.raw);
|
||||
continue;
|
||||
} else if (count != 0) {
|
||||
--count;
|
||||
ExecuteCommand(method_offset, value.raw);
|
||||
if (incrementing) {
|
||||
++method_offset;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
const auto mode = value.submission_mode.Value();
|
||||
switch (mode) {
|
||||
case ChSubmissionMode::SetClass: {
|
||||
mask = value.value & 0x3f;
|
||||
method_offset = value.method_offset;
|
||||
current_class = static_cast<ChClassId>((value.value >> 6) & 0x3ff);
|
||||
break;
|
||||
}
|
||||
case ChSubmissionMode::Incrementing:
|
||||
case ChSubmissionMode::NonIncrementing:
|
||||
count = value.value;
|
||||
method_offset = value.method_offset;
|
||||
incrementing = mode == ChSubmissionMode::Incrementing;
|
||||
break;
|
||||
case ChSubmissionMode::Mask:
|
||||
mask = value.value;
|
||||
method_offset = value.method_offset;
|
||||
break;
|
||||
case ChSubmissionMode::Immediate: {
|
||||
const u32 data = value.value & 0xfff;
|
||||
method_offset = value.method_offset;
|
||||
ExecuteCommand(method_offset, data);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Bad command at index {} (bytes 0x{:X}), buffer size {}", i - 1,
|
||||
(i - 1) * sizeof(u32), command_list.size());
|
||||
UNIMPLEMENTED_MSG("ChSubmission mode {} is not implemented!",
|
||||
static_cast<u32>(mode));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
|
||||
void CDmaPusher::ExecuteCommand(u32 method, u32 arg) {
|
||||
switch (current_class) {
|
||||
case ChClassId::NvDec:
|
||||
ThiStateWrite(nvdec_thi_state, offset, data);
|
||||
switch (static_cast<ThiMethod>(offset)) {
|
||||
case ThiMethod::IncSyncpt: {
|
||||
LOG_DEBUG(Service_NVDRV, "NVDEC Class IncSyncpt Method");
|
||||
const auto syncpoint_id = static_cast<u32>(data & 0xFF);
|
||||
const auto cond = static_cast<u32>((data >> 8) & 0xFF);
|
||||
if (cond == 0) {
|
||||
sync_manager->Increment(syncpoint_id);
|
||||
} else {
|
||||
sync_manager->SignalDone(
|
||||
sync_manager->IncrementWhenDone(static_cast<u32>(current_class), syncpoint_id));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case ThiMethod::SetMethod1:
|
||||
LOG_DEBUG(Service_NVDRV, "NVDEC method 0x{:X}",
|
||||
static_cast<u32>(nvdec_thi_state.method_0));
|
||||
nvdec_processor->ProcessMethod(nvdec_thi_state.method_0, data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ChClassId::GraphicsVic:
|
||||
ThiStateWrite(vic_thi_state, static_cast<u32>(state_offset), {data});
|
||||
switch (static_cast<ThiMethod>(state_offset)) {
|
||||
case ThiMethod::IncSyncpt: {
|
||||
LOG_DEBUG(Service_NVDRV, "VIC Class IncSyncpt Method");
|
||||
const auto syncpoint_id = static_cast<u32>(data & 0xFF);
|
||||
const auto cond = static_cast<u32>((data >> 8) & 0xFF);
|
||||
if (cond == 0) {
|
||||
sync_manager->Increment(syncpoint_id);
|
||||
} else {
|
||||
sync_manager->SignalDone(
|
||||
sync_manager->IncrementWhenDone(static_cast<u32>(current_class), syncpoint_id));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case ThiMethod::SetMethod1:
|
||||
LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})",
|
||||
static_cast<u32>(vic_thi_state.method_0), data);
|
||||
vic_processor->ProcessMethod(static_cast<Host1x::Vic::Method>(vic_thi_state.method_0),
|
||||
data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ChClassId::Control:
|
||||
// This device is mainly for syncpoint synchronization
|
||||
LOG_DEBUG(Service_NVDRV, "Host1X Class Method");
|
||||
host1x_processor->ProcessMethod(static_cast<Host1x::Control::Method>(offset), data);
|
||||
LOG_TRACE(Service_NVDRV, "Class {} method 0x{:X} arg 0x{:X}",
|
||||
static_cast<u32>(current_class), method, arg);
|
||||
host_processor->ProcessMethod(static_cast<Host1x::Control::Method>(method), arg);
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class));
|
||||
break;
|
||||
thi_regs.reg_array[method] = arg;
|
||||
switch (static_cast<ThiMethod>(method)) {
|
||||
case ThiMethod::IncSyncpt: {
|
||||
const auto syncpoint_id = static_cast<u32>(arg & 0xFF);
|
||||
[[maybe_unused]] const auto cond = static_cast<u32>((arg >> 8) & 0xFF);
|
||||
LOG_TRACE(Service_NVDRV, "Class {} IncSyncpt Method, syncpt {} cond {}",
|
||||
static_cast<u32>(current_class), syncpoint_id, cond);
|
||||
auto& syncpoint_manager = host1x.GetSyncpointManager();
|
||||
syncpoint_manager.IncrementGuest(syncpoint_id);
|
||||
syncpoint_manager.IncrementHost(syncpoint_id);
|
||||
break;
|
||||
}
|
||||
case ThiMethod::SetMethod1:
|
||||
LOG_TRACE(Service_NVDRV, "Class {} method 0x{:X} arg 0x{:X}",
|
||||
static_cast<u32>(current_class), static_cast<u32>(thi_regs.method_0), arg);
|
||||
ProcessMethod(thi_regs.method_0, arg);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CDmaPusher::ThiStateWrite(ThiRegisters& state, u32 state_offset, u32 argument) {
|
||||
u8* const offset_ptr = reinterpret_cast<u8*>(&state) + sizeof(u32) * state_offset;
|
||||
std::memcpy(offset_ptr, &argument, sizeof(u32));
|
||||
}
|
||||
|
||||
} // namespace Tegra
|
||||
|
@ -3,12 +3,18 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <condition_variable>
|
||||
#include <deque>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/polyfill_thread.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
@ -62,23 +68,31 @@ struct ChCommand {
|
||||
std::vector<u32> arguments;
|
||||
};
|
||||
|
||||
using ChCommandHeaderList = std::vector<ChCommandHeader>;
|
||||
using ChCommandHeaderList =
|
||||
Core::Memory::CpuGuestMemory<Tegra::ChCommandHeader, Core::Memory::GuestMemoryFlags::SafeRead>;
|
||||
|
||||
struct ThiRegisters {
|
||||
u32_le increment_syncpt{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
u32_le increment_syncpt_error{};
|
||||
u32_le ctx_switch_incremement_syncpt{};
|
||||
INSERT_PADDING_WORDS(4);
|
||||
u32_le ctx_switch{};
|
||||
INSERT_PADDING_WORDS(1);
|
||||
u32_le ctx_syncpt_eof{};
|
||||
INSERT_PADDING_WORDS(5);
|
||||
u32_le method_0{};
|
||||
u32_le method_1{};
|
||||
INSERT_PADDING_WORDS(12);
|
||||
u32_le int_status{};
|
||||
u32_le int_mask{};
|
||||
static constexpr std::size_t NUM_REGS = 0x20;
|
||||
|
||||
union {
|
||||
struct {
|
||||
u32_le increment_syncpt;
|
||||
INSERT_PADDING_WORDS_NOINIT(1);
|
||||
u32_le increment_syncpt_error;
|
||||
u32_le ctx_switch_incremement_syncpt;
|
||||
INSERT_PADDING_WORDS_NOINIT(4);
|
||||
u32_le ctx_switch;
|
||||
INSERT_PADDING_WORDS_NOINIT(1);
|
||||
u32_le ctx_syncpt_eof;
|
||||
INSERT_PADDING_WORDS_NOINIT(5);
|
||||
u32_le method_0;
|
||||
u32_le method_1;
|
||||
INSERT_PADDING_WORDS_NOINIT(12);
|
||||
u32_le int_status;
|
||||
u32_le int_mask;
|
||||
};
|
||||
std::array<u32, NUM_REGS> reg_array;
|
||||
};
|
||||
};
|
||||
|
||||
enum class ThiMethod : u32 {
|
||||
@ -89,32 +103,39 @@ enum class ThiMethod : u32 {
|
||||
|
||||
class CDmaPusher {
|
||||
public:
|
||||
explicit CDmaPusher(Host1x::Host1x& host1x);
|
||||
~CDmaPusher();
|
||||
CDmaPusher() = delete;
|
||||
virtual ~CDmaPusher();
|
||||
|
||||
/// Process the command entry
|
||||
void ProcessEntries(ChCommandHeaderList&& entries);
|
||||
void PushEntries(ChCommandHeaderList&& entries) {
|
||||
std::scoped_lock l{command_mutex};
|
||||
command_lists.push_back(std::move(entries));
|
||||
command_cv.notify_one();
|
||||
}
|
||||
|
||||
protected:
|
||||
explicit CDmaPusher(Host1x::Host1x& host1x, s32 id);
|
||||
|
||||
virtual void ProcessMethod(u32 method, u32 arg) = 0;
|
||||
|
||||
Host1x::Host1x& host1x;
|
||||
Tegra::MemoryManager& memory_manager;
|
||||
|
||||
private:
|
||||
/// Process the command entry
|
||||
void ProcessEntries(std::stop_token stop_token);
|
||||
|
||||
/// Invoke command class devices to execute the command based on the current state
|
||||
void ExecuteCommand(u32 state_offset, u32 data);
|
||||
|
||||
/// Write arguments value to the ThiRegisters member at the specified offset
|
||||
void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument);
|
||||
std::unique_ptr<Host1x::Control> host_processor;
|
||||
|
||||
Host1x::Host1x& host1x;
|
||||
std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
|
||||
std::unique_ptr<Tegra::Host1x::Vic> vic_processor;
|
||||
std::unique_ptr<Tegra::Host1x::Control> host1x_processor;
|
||||
std::unique_ptr<Host1x::SyncptIncrManager> sync_manager;
|
||||
ChClassId current_class{};
|
||||
ThiRegisters vic_thi_state{};
|
||||
ThiRegisters nvdec_thi_state{};
|
||||
std::mutex command_mutex;
|
||||
std::condition_variable_any command_cv;
|
||||
std::deque<ChCommandHeaderList> command_lists;
|
||||
std::jthread thread;
|
||||
|
||||
u32 count{};
|
||||
u32 offset{};
|
||||
u32 mask{};
|
||||
bool incrementing{};
|
||||
ThiRegisters thi_regs{};
|
||||
ChClassId current_class;
|
||||
};
|
||||
|
||||
} // namespace Tegra
|
||||
|
@ -13,20 +13,102 @@ Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {}
|
||||
|
||||
Scheduler::~Scheduler() = default;
|
||||
|
||||
void Scheduler::Init() {
|
||||
master_control = Common::Fiber::ThreadToFiber();
|
||||
}
|
||||
|
||||
void Scheduler::Resume() {
|
||||
bool nothing_pending;
|
||||
do {
|
||||
nothing_pending = true;
|
||||
current_fifo = nullptr;
|
||||
{
|
||||
std::unique_lock lk(scheduling_guard);
|
||||
size_t num_iters = gpfifos.size();
|
||||
for (size_t i = 0; i < num_iters; i++) {
|
||||
size_t current_id = (current_fifo_rotation_id + i) % gpfifos.size();
|
||||
auto& fifo = gpfifos[current_id];
|
||||
if (!fifo.is_active) {
|
||||
continue;
|
||||
}
|
||||
std::scoped_lock lk2(fifo.guard);
|
||||
if (!fifo.pending_work.empty() || fifo.working.load(std::memory_order_acquire)) {
|
||||
current_fifo = &fifo;
|
||||
current_fifo_rotation_id = current_id;
|
||||
nothing_pending = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (current_fifo) {
|
||||
Common::Fiber::YieldTo(master_control, *current_fifo->context);
|
||||
current_fifo = nullptr;
|
||||
}
|
||||
} while (!nothing_pending);
|
||||
}
|
||||
|
||||
void Scheduler::Yield() {
|
||||
ASSERT(current_fifo != nullptr);
|
||||
Common::Fiber::YieldTo(current_fifo->context, *master_control);
|
||||
gpu.BindChannel(current_fifo->bind_id);
|
||||
}
|
||||
|
||||
void Scheduler::Push(s32 channel, CommandList&& entries) {
|
||||
std::unique_lock lk(scheduling_guard);
|
||||
auto it = channels.find(channel);
|
||||
ASSERT(it != channels.end());
|
||||
auto channel_state = it->second;
|
||||
gpu.BindChannel(channel_state->bind_id);
|
||||
channel_state->dma_pusher->Push(std::move(entries));
|
||||
channel_state->dma_pusher->DispatchCalls();
|
||||
auto it = channel_gpfifo_ids.find(channel);
|
||||
ASSERT(it != channel_gpfifo_ids.end());
|
||||
auto gpfifo_id = it->second;
|
||||
auto& fifo = gpfifos[gpfifo_id];
|
||||
{
|
||||
std::scoped_lock lk2(fifo.guard);
|
||||
fifo.pending_work.emplace_back(std::move(entries));
|
||||
}
|
||||
}
|
||||
|
||||
void Scheduler::ChannelLoop(size_t gpfifo_id, s32 channel_id) {
|
||||
gpu.BindChannel(channel_id);
|
||||
auto& fifo = gpfifos[gpfifo_id];
|
||||
while (true) {
|
||||
auto* channel_state = channels[channel_id].get();
|
||||
fifo.guard.lock();
|
||||
while (!fifo.pending_work.empty()) {
|
||||
{
|
||||
|
||||
fifo.working.store(true, std::memory_order_release);
|
||||
CommandList&& entries = std::move(fifo.pending_work.front());
|
||||
channel_state->dma_pusher->Push(std::move(entries));
|
||||
fifo.pending_work.pop_front();
|
||||
}
|
||||
fifo.guard.unlock();
|
||||
channel_state->dma_pusher->DispatchCalls();
|
||||
fifo.guard.lock();
|
||||
}
|
||||
fifo.working.store(false, std::memory_order_relaxed);
|
||||
fifo.guard.unlock();
|
||||
Common::Fiber::YieldTo(fifo.context, *master_control);
|
||||
gpu.BindChannel(channel_id);
|
||||
}
|
||||
}
|
||||
|
||||
void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) {
|
||||
s32 channel = new_channel->bind_id;
|
||||
std::unique_lock lk(scheduling_guard);
|
||||
channels.emplace(channel, new_channel);
|
||||
size_t new_fifo_id;
|
||||
if (!free_fifos.empty()) {
|
||||
new_fifo_id = free_fifos.front();
|
||||
free_fifos.pop_front();
|
||||
} else {
|
||||
new_fifo_id = gpfifos.size();
|
||||
gpfifos.emplace_back();
|
||||
}
|
||||
auto& new_fifo = gpfifos[new_fifo_id];
|
||||
channel_gpfifo_ids[channel] = new_fifo_id;
|
||||
new_fifo.is_active = true;
|
||||
new_fifo.bind_id = channel;
|
||||
new_fifo.pending_work.clear();
|
||||
std::function<void()> callback = std::bind(&Scheduler::ChannelLoop, this, new_fifo_id, channel);
|
||||
new_fifo.context = std::make_shared<Common::Fiber>(std::move(callback));
|
||||
}
|
||||
|
||||
} // namespace Tegra::Control
|
||||
|
@ -3,10 +3,13 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <deque>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "common/fiber.h"
|
||||
#include "video_core/dma_pusher.h"
|
||||
|
||||
namespace Tegra {
|
||||
@ -22,14 +25,36 @@ public:
|
||||
explicit Scheduler(GPU& gpu_);
|
||||
~Scheduler();
|
||||
|
||||
void Init();
|
||||
|
||||
void Resume();
|
||||
|
||||
void Yield();
|
||||
|
||||
void Push(s32 channel, CommandList&& entries);
|
||||
|
||||
void DeclareChannel(std::shared_ptr<ChannelState> new_channel);
|
||||
|
||||
private:
|
||||
void ChannelLoop(size_t gpfifo_id, s32 channel_id);
|
||||
|
||||
std::unordered_map<s32, std::shared_ptr<ChannelState>> channels;
|
||||
std::unordered_map<s32, size_t> channel_gpfifo_ids;
|
||||
std::mutex scheduling_guard;
|
||||
std::shared_ptr<Common::Fiber> master_control;
|
||||
struct GPFifoContext {
|
||||
bool is_active;
|
||||
std::shared_ptr<Common::Fiber> context;
|
||||
std::deque<CommandList> pending_work;
|
||||
std::atomic<bool> working{};
|
||||
std::mutex guard;
|
||||
s32 bind_id;
|
||||
};
|
||||
std::deque<GPFifoContext> gpfifos;
|
||||
std::deque<size_t> free_fifos;
|
||||
GPU& gpu;
|
||||
size_t current_fifo_rotation_id{};
|
||||
GPFifoContext* current_fifo{};
|
||||
};
|
||||
|
||||
} // namespace Control
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "common/settings.h"
|
||||
#include "core/core.h"
|
||||
#include "video_core/control/channel_state.h"
|
||||
#include "video_core/control/scheduler.h"
|
||||
#include "video_core/dma_pusher.h"
|
||||
#include "video_core/engines/fermi_2d.h"
|
||||
#include "video_core/engines/kepler_compute.h"
|
||||
@ -14,6 +15,8 @@
|
||||
#include "video_core/engines/maxwell_dma.h"
|
||||
#include "video_core/engines/puller.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
|
||||
@ -60,11 +63,14 @@ void Puller::ProcessBindMethod(const MethodCall& method_call) {
|
||||
}
|
||||
|
||||
void Puller::ProcessFenceActionMethod() {
|
||||
auto& syncpoint_manager = gpu.Host1x().GetSyncpointManager();
|
||||
switch (regs.fence_action.op) {
|
||||
case Puller::FenceOperation::Acquire:
|
||||
// UNIMPLEMENTED_MSG("Channel Scheduling pending.");
|
||||
// WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
|
||||
rasterizer->ReleaseFences();
|
||||
while (regs.fence_value >
|
||||
syncpoint_manager.GetGuestSyncpointValue(regs.fence_action.syncpoint_id)) {
|
||||
rasterizer->ReleaseFences();
|
||||
gpu.Scheduler().Yield();
|
||||
}
|
||||
break;
|
||||
case Puller::FenceOperation::Increment:
|
||||
rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id);
|
||||
|
@ -11,6 +11,12 @@
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
enum class BlendMode {
|
||||
Opaque,
|
||||
Premultiplied,
|
||||
Coverage,
|
||||
};
|
||||
|
||||
/**
|
||||
* Struct describing framebuffer configuration
|
||||
*/
|
||||
@ -23,6 +29,7 @@ struct FramebufferConfig {
|
||||
Service::android::PixelFormat pixel_format{};
|
||||
Service::android::BufferTransformFlags transform_flags{};
|
||||
Common::Rectangle<int> crop_rect{};
|
||||
BlendMode blending{};
|
||||
};
|
||||
|
||||
Common::Rectangle<f32> NormalizeCrop(const FramebufferConfig& framebuffer, u32 texture_width,
|
||||
|
@ -250,30 +250,6 @@ struct GPU::Impl {
|
||||
gpu_thread.SubmitList(channel, std::move(entries));
|
||||
}
|
||||
|
||||
/// Push GPU command buffer entries to be processed
|
||||
void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) {
|
||||
if (!use_nvdec) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!cdma_pushers.contains(id)) {
|
||||
cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(host1x));
|
||||
}
|
||||
|
||||
// SubmitCommandBuffer would make the nvdec operations async, this is not currently working
|
||||
// TODO(ameerj): RE proper async nvdec operation
|
||||
// gpu_thread.SubmitCommandBuffer(std::move(entries));
|
||||
cdma_pushers[id]->ProcessEntries(std::move(entries));
|
||||
}
|
||||
|
||||
/// Frees the CDMAPusher instance to free up resources
|
||||
void ClearCdmaInstance(u32 id) {
|
||||
const auto iter = cdma_pushers.find(id);
|
||||
if (iter != cdma_pushers.end()) {
|
||||
cdma_pushers.erase(iter);
|
||||
}
|
||||
}
|
||||
|
||||
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||
void FlushRegion(DAddr addr, u64 size) {
|
||||
gpu_thread.FlushRegion(addr, size);
|
||||
@ -347,11 +323,21 @@ struct GPU::Impl {
|
||||
WaitForSyncOperation(wait_fence);
|
||||
}
|
||||
|
||||
std::vector<u8> GetAppletCaptureBuffer() {
|
||||
std::vector<u8> out;
|
||||
|
||||
const auto wait_fence =
|
||||
RequestSyncOperation([&] { out = renderer->GetAppletCaptureBuffer(); });
|
||||
gpu_thread.TickGPU();
|
||||
WaitForSyncOperation(wait_fence);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
GPU& gpu;
|
||||
Core::System& system;
|
||||
Host1x::Host1x& host1x;
|
||||
|
||||
std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers;
|
||||
std::unique_ptr<VideoCore::RendererBase> renderer;
|
||||
VideoCore::RasterizerInterface* rasterizer = nullptr;
|
||||
const bool use_nvdec;
|
||||
@ -401,6 +387,14 @@ std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() {
|
||||
return impl->AllocateChannel();
|
||||
}
|
||||
|
||||
Tegra::Control::Scheduler& GPU::Scheduler() {
|
||||
return *impl->scheduler;
|
||||
}
|
||||
|
||||
const Tegra::Control::Scheduler& GPU::Scheduler() const {
|
||||
return *impl->scheduler;
|
||||
}
|
||||
|
||||
void GPU::InitChannel(Control::ChannelState& to_init) {
|
||||
impl->InitChannel(to_init);
|
||||
}
|
||||
@ -505,6 +499,10 @@ void GPU::RequestComposite(std::vector<Tegra::FramebufferConfig>&& layers,
|
||||
impl->RequestComposite(std::move(layers), std::move(fences));
|
||||
}
|
||||
|
||||
std::vector<u8> GPU::GetAppletCaptureBuffer() {
|
||||
return impl->GetAppletCaptureBuffer();
|
||||
}
|
||||
|
||||
u64 GPU::GetTicks() const {
|
||||
return impl->GetTicks();
|
||||
}
|
||||
@ -541,14 +539,6 @@ void GPU::PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
|
||||
impl->PushGPUEntries(channel, std::move(entries));
|
||||
}
|
||||
|
||||
void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) {
|
||||
impl->PushCommandBuffer(id, entries);
|
||||
}
|
||||
|
||||
void GPU::ClearCdmaInstance(u32 id) {
|
||||
impl->ClearCdmaInstance(id);
|
||||
}
|
||||
|
||||
VideoCore::RasterizerDownloadArea GPU::OnCPURead(PAddr addr, u64 size) {
|
||||
return impl->OnCPURead(addr, size);
|
||||
}
|
||||
|
@ -124,7 +124,8 @@ class KeplerCompute;
|
||||
|
||||
namespace Control {
|
||||
struct ChannelState;
|
||||
}
|
||||
class Scheduler;
|
||||
} // namespace Control
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
@ -204,6 +205,12 @@ public:
|
||||
/// Returns a const reference to the shader notifier.
|
||||
[[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
|
||||
|
||||
/// Returns GPU Channel Scheduler.
|
||||
[[nodiscard]] Tegra::Control::Scheduler& Scheduler();
|
||||
|
||||
/// Returns GPU Channel Scheduler.
|
||||
[[nodiscard]] const Tegra::Control::Scheduler& Scheduler() const;
|
||||
|
||||
[[nodiscard]] u64 GetTicks() const;
|
||||
|
||||
[[nodiscard]] bool IsAsync() const;
|
||||
@ -215,6 +222,8 @@ public:
|
||||
void RequestComposite(std::vector<Tegra::FramebufferConfig>&& layers,
|
||||
std::vector<Service::Nvidia::NvFence>&& fences);
|
||||
|
||||
std::vector<u8> GetAppletCaptureBuffer();
|
||||
|
||||
/// Performs any additional setup necessary in order to begin GPU emulation.
|
||||
/// This can be used to launch any necessary threads and register any necessary
|
||||
/// core timing events.
|
||||
@ -232,15 +241,6 @@ public:
|
||||
/// Push GPU command entries to be processed
|
||||
void PushGPUEntries(s32 channel, Tegra::CommandList&& entries);
|
||||
|
||||
/// Push GPU command buffer entries to be processed
|
||||
void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries);
|
||||
|
||||
/// Frees the CDMAPusher instance to free up resources
|
||||
void ClearCdmaInstance(u32 id);
|
||||
|
||||
/// Swap buffers (render frame)
|
||||
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
|
||||
|
||||
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||
[[nodiscard]] VideoCore::RasterizerDownloadArea OnCPURead(DAddr addr, u64 size);
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "video_core/dma_pusher.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/gpu_thread.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace VideoCommon::GPUThread {
|
||||
@ -33,13 +34,15 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
|
||||
|
||||
CommandDataContainer next;
|
||||
|
||||
scheduler.Init();
|
||||
|
||||
while (!stop_token.stop_requested()) {
|
||||
state.queue.PopWait(next, stop_token);
|
||||
if (stop_token.stop_requested()) {
|
||||
break;
|
||||
}
|
||||
if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) {
|
||||
scheduler.Push(submit_list->channel, std::move(submit_list->entries));
|
||||
if (std::holds_alternative<SubmitListCommand>(next.data)) {
|
||||
scheduler.Resume();
|
||||
} else if (std::holds_alternative<GPUTickCommand>(next.data)) {
|
||||
system.GPU().TickWork();
|
||||
} else if (const auto* flush = std::get_if<FlushRegionCommand>(&next.data)) {
|
||||
@ -66,14 +69,16 @@ ThreadManager::~ThreadManager() = default;
|
||||
|
||||
void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
|
||||
Core::Frontend::GraphicsContext& context,
|
||||
Tegra::Control::Scheduler& scheduler) {
|
||||
Tegra::Control::Scheduler& scheduler_) {
|
||||
rasterizer = renderer.ReadRasterizer();
|
||||
scheduler = &scheduler_;
|
||||
thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
|
||||
std::ref(scheduler), std::ref(state));
|
||||
std::ref(scheduler_), std::ref(state));
|
||||
}
|
||||
|
||||
void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) {
|
||||
PushCommand(SubmitListCommand(channel, std::move(entries)));
|
||||
scheduler->Push(channel, std::move(entries));
|
||||
PushCommand(SubmitListCommand());
|
||||
}
|
||||
|
||||
void ThreadManager::FlushRegion(DAddr addr, u64 size) {
|
||||
|
@ -36,13 +36,7 @@ class RendererBase;
|
||||
namespace VideoCommon::GPUThread {
|
||||
|
||||
/// Command to signal to the GPU thread that a command list is ready for processing
|
||||
struct SubmitListCommand final {
|
||||
explicit SubmitListCommand(s32 channel_, Tegra::CommandList&& entries_)
|
||||
: channel{channel_}, entries{std::move(entries_)} {}
|
||||
|
||||
s32 channel;
|
||||
Tegra::CommandList entries;
|
||||
};
|
||||
struct SubmitListCommand final {};
|
||||
|
||||
/// Command to signal to the GPU thread to flush a region
|
||||
struct FlushRegionCommand final {
|
||||
@ -124,6 +118,7 @@ public:
|
||||
private:
|
||||
/// Pushes a command to be executed by the GPU thread
|
||||
u64 PushCommand(CommandData&& command_data, bool block = false);
|
||||
Tegra::Control::Scheduler* scheduler;
|
||||
|
||||
Core::System& system;
|
||||
const bool is_async;
|
||||
|
@ -1,113 +0,0 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/settings.h"
|
||||
#include "video_core/host1x/codecs/codec.h"
|
||||
#include "video_core/host1x/codecs/h264.h"
|
||||
#include "video_core/host1x/codecs/vp8.h"
|
||||
#include "video_core/host1x/codecs/vp9.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs)
|
||||
: host1x(host1x_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(host1x)),
|
||||
vp8_decoder(std::make_unique<Decoder::VP8>(host1x)),
|
||||
vp9_decoder(std::make_unique<Decoder::VP9>(host1x)) {}
|
||||
|
||||
Codec::~Codec() = default;
|
||||
|
||||
void Codec::Initialize() {
|
||||
initialized = decode_api.Initialize(current_codec);
|
||||
}
|
||||
|
||||
void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) {
|
||||
if (current_codec != codec) {
|
||||
current_codec = codec;
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName());
|
||||
}
|
||||
}
|
||||
|
||||
void Codec::Decode() {
|
||||
const bool is_first_frame = !initialized;
|
||||
if (is_first_frame) {
|
||||
Initialize();
|
||||
}
|
||||
|
||||
if (!initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Assemble bitstream.
|
||||
bool vp9_hidden_frame = false;
|
||||
size_t configuration_size = 0;
|
||||
const auto packet_data = [&]() {
|
||||
switch (current_codec) {
|
||||
case Tegra::Host1x::NvdecCommon::VideoCodec::H264:
|
||||
return h264_decoder->ComposeFrame(state, &configuration_size, is_first_frame);
|
||||
case Tegra::Host1x::NvdecCommon::VideoCodec::VP8:
|
||||
return vp8_decoder->ComposeFrame(state);
|
||||
case Tegra::Host1x::NvdecCommon::VideoCodec::VP9:
|
||||
vp9_decoder->ComposeFrame(state);
|
||||
vp9_hidden_frame = vp9_decoder->WasFrameHidden();
|
||||
return vp9_decoder->GetFrameBytes();
|
||||
default:
|
||||
ASSERT(false);
|
||||
return std::span<const u8>{};
|
||||
}
|
||||
}();
|
||||
|
||||
// Send assembled bitstream to decoder.
|
||||
if (!decode_api.SendPacket(packet_data, configuration_size)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Only receive/store visible frames.
|
||||
if (vp9_hidden_frame) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Receive output frames from decoder.
|
||||
decode_api.ReceiveFrames(frames);
|
||||
|
||||
while (frames.size() > 10) {
|
||||
LOG_DEBUG(HW_GPU, "ReceiveFrames overflow, dropped frame");
|
||||
frames.pop();
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<FFmpeg::Frame> Codec::GetCurrentFrame() {
|
||||
// Sometimes VIC will request more frames than have been decoded.
|
||||
// in this case, return a blank frame and don't overwrite previous data.
|
||||
if (frames.empty()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
auto frame = std::move(frames.front());
|
||||
frames.pop();
|
||||
return frame;
|
||||
}
|
||||
|
||||
Host1x::NvdecCommon::VideoCodec Codec::GetCurrentCodec() const {
|
||||
return current_codec;
|
||||
}
|
||||
|
||||
std::string_view Codec::GetCurrentCodecName() const {
|
||||
switch (current_codec) {
|
||||
case Host1x::NvdecCommon::VideoCodec::None:
|
||||
return "None";
|
||||
case Host1x::NvdecCommon::VideoCodec::H264:
|
||||
return "H264";
|
||||
case Host1x::NvdecCommon::VideoCodec::VP8:
|
||||
return "VP8";
|
||||
case Host1x::NvdecCommon::VideoCodec::H265:
|
||||
return "H265";
|
||||
case Host1x::NvdecCommon::VideoCodec::VP9:
|
||||
return "VP9";
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
} // namespace Tegra
|
@ -1,63 +0,0 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string_view>
|
||||
#include <queue>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/host1x/ffmpeg/ffmpeg.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Decoder {
|
||||
class H264;
|
||||
class VP8;
|
||||
class VP9;
|
||||
} // namespace Decoder
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
class Codec {
|
||||
public:
|
||||
explicit Codec(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs);
|
||||
~Codec();
|
||||
|
||||
/// Initialize the codec, returning success or failure
|
||||
void Initialize();
|
||||
|
||||
/// Sets NVDEC video stream codec
|
||||
void SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec);
|
||||
|
||||
/// Call decoders to construct headers, decode AVFrame with ffmpeg
|
||||
void Decode();
|
||||
|
||||
/// Returns next decoded frame
|
||||
[[nodiscard]] std::unique_ptr<FFmpeg::Frame> GetCurrentFrame();
|
||||
|
||||
/// Returns the value of current_codec
|
||||
[[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const;
|
||||
|
||||
/// Return name of the current codec
|
||||
[[nodiscard]] std::string_view GetCurrentCodecName() const;
|
||||
|
||||
private:
|
||||
bool initialized{};
|
||||
Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None};
|
||||
FFmpeg::DecodeApi decode_api;
|
||||
|
||||
Host1x::Host1x& host1x;
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state;
|
||||
std::unique_ptr<Decoder::H264> h264_decoder;
|
||||
std::unique_ptr<Decoder::VP8> vp8_decoder;
|
||||
std::unique_ptr<Decoder::VP9> vp9_decoder;
|
||||
|
||||
std::queue<std::unique_ptr<FFmpeg::Frame>> frames{};
|
||||
};
|
||||
|
||||
} // namespace Tegra
|
69
src/video_core/host1x/codecs/decoder.cpp
Normal file
69
src/video_core/host1x/codecs/decoder.cpp
Normal file
@ -0,0 +1,69 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/settings.h"
|
||||
#include "video_core/host1x/codecs/decoder.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
Decoder::Decoder(Host1x::Host1x& host1x_, s32 id_, const Host1x::NvdecCommon::NvdecRegisters& regs_,
|
||||
Host1x::FrameQueue& frame_queue_)
|
||||
: host1x(host1x_), memory_manager{host1x.GMMU()}, regs{regs_}, id{id_}, frame_queue{
|
||||
frame_queue_} {}
|
||||
|
||||
Decoder::~Decoder() = default;
|
||||
|
||||
void Decoder::Decode() {
|
||||
if (!initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto packet_data = ComposeFrame();
|
||||
// Send assembled bitstream to decoder.
|
||||
if (!decode_api.SendPacket(packet_data)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Only receive/store visible frames.
|
||||
if (vp9_hidden_frame) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Receive output frames from decoder.
|
||||
auto frame = decode_api.ReceiveFrame();
|
||||
|
||||
if (IsInterlaced()) {
|
||||
auto [luma_top, luma_bottom, chroma_top, chroma_bottom] = GetInterlacedOffsets();
|
||||
auto frame_copy = frame;
|
||||
|
||||
if (!frame.get()) {
|
||||
LOG_ERROR(HW_GPU, "Failed to decode interlaced frame for top 0x{:X} bottom 0x{:X}",
|
||||
luma_top, luma_bottom);
|
||||
}
|
||||
|
||||
if (UsingDecodeOrder()) {
|
||||
frame_queue.PushDecodeOrder(id, luma_top, std::move(frame));
|
||||
frame_queue.PushDecodeOrder(id, luma_bottom, std::move(frame_copy));
|
||||
} else {
|
||||
frame_queue.PushPresentOrder(id, luma_top, std::move(frame));
|
||||
frame_queue.PushPresentOrder(id, luma_bottom, std::move(frame_copy));
|
||||
}
|
||||
} else {
|
||||
auto [luma_offset, chroma_offset] = GetProgressiveOffsets();
|
||||
|
||||
if (!frame.get()) {
|
||||
LOG_ERROR(HW_GPU, "Failed to decode progressive frame for luma 0x{:X}", luma_offset);
|
||||
}
|
||||
|
||||
if (UsingDecodeOrder()) {
|
||||
frame_queue.PushDecodeOrder(id, luma_offset, std::move(frame));
|
||||
} else {
|
||||
frame_queue.PushPresentOrder(id, luma_offset, std::move(frame));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Tegra
|
64
src/video_core/host1x/codecs/decoder.h
Normal file
64
src/video_core/host1x/codecs/decoder.h
Normal file
@ -0,0 +1,64 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string_view>
|
||||
#include <unordered_map>
|
||||
#include <queue>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/host1x/ffmpeg/ffmpeg.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
class Host1x;
|
||||
class FrameQueue;
|
||||
} // namespace Host1x
|
||||
|
||||
class Decoder {
|
||||
public:
|
||||
virtual ~Decoder();
|
||||
|
||||
/// Call decoders to construct headers, decode AVFrame with ffmpeg
|
||||
void Decode();
|
||||
|
||||
bool UsingDecodeOrder() const {
|
||||
return decode_api.UsingDecodeOrder();
|
||||
}
|
||||
|
||||
/// Returns the value of current_codec
|
||||
[[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const {
|
||||
return codec;
|
||||
}
|
||||
|
||||
/// Return name of the current codec
|
||||
[[nodiscard]] virtual std::string_view GetCurrentCodecName() const = 0;
|
||||
|
||||
protected:
|
||||
explicit Decoder(Host1x::Host1x& host1x, s32 id,
|
||||
const Host1x::NvdecCommon::NvdecRegisters& regs,
|
||||
Host1x::FrameQueue& frame_queue);
|
||||
|
||||
virtual std::span<const u8> ComposeFrame() = 0;
|
||||
virtual std::tuple<u64, u64> GetProgressiveOffsets() = 0;
|
||||
virtual std::tuple<u64, u64, u64, u64> GetInterlacedOffsets() = 0;
|
||||
virtual bool IsInterlaced() = 0;
|
||||
|
||||
Host1x::Host1x& host1x;
|
||||
Tegra::MemoryManager& memory_manager;
|
||||
const Host1x::NvdecCommon::NvdecRegisters& regs;
|
||||
s32 id;
|
||||
Host1x::FrameQueue& frame_queue;
|
||||
Host1x::NvdecCommon::VideoCodec codec;
|
||||
FFmpeg::DecodeApi decode_api;
|
||||
bool initialized{};
|
||||
bool vp9_hidden_frame{};
|
||||
};
|
||||
|
||||
} // namespace Tegra
|
@ -10,7 +10,7 @@
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Tegra::Decoder {
|
||||
namespace Tegra::Decoders {
|
||||
namespace {
|
||||
// ZigZag LUTs from libavcodec.
|
||||
constexpr std::array<u8, 64> zig_zag_direct{
|
||||
@ -25,23 +25,56 @@ constexpr std::array<u8, 16> zig_zag_scan{
|
||||
};
|
||||
} // Anonymous namespace
|
||||
|
||||
H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||
H264::H264(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs_, s32 id_,
|
||||
Host1x::FrameQueue& frame_queue_)
|
||||
: Decoder{host1x_, id_, regs_, frame_queue_} {
|
||||
codec = Host1x::NvdecCommon::VideoCodec::H264;
|
||||
initialized = decode_api.Initialize(codec);
|
||||
}
|
||||
|
||||
H264::~H264() = default;
|
||||
|
||||
std::span<const u8> H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state,
|
||||
size_t* out_configuration_size, bool is_first_frame) {
|
||||
H264DecoderContext context;
|
||||
host1x.GMMU().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext));
|
||||
std::tuple<u64, u64> H264::GetProgressiveOffsets() {
|
||||
auto pic_idx{current_context.h264_parameter_set.curr_pic_idx};
|
||||
auto luma{regs.surface_luma_offsets[pic_idx].Address() +
|
||||
current_context.h264_parameter_set.luma_frame_offset.Address()};
|
||||
auto chroma{regs.surface_chroma_offsets[pic_idx].Address() +
|
||||
current_context.h264_parameter_set.chroma_frame_offset.Address()};
|
||||
return {luma, chroma};
|
||||
}
|
||||
|
||||
const s64 frame_number = context.h264_parameter_set.frame_number.Value();
|
||||
std::tuple<u64, u64, u64, u64> H264::GetInterlacedOffsets() {
|
||||
auto pic_idx{current_context.h264_parameter_set.curr_pic_idx};
|
||||
auto luma_top{regs.surface_luma_offsets[pic_idx].Address() +
|
||||
current_context.h264_parameter_set.luma_top_offset.Address()};
|
||||
auto luma_bottom{regs.surface_luma_offsets[pic_idx].Address() +
|
||||
current_context.h264_parameter_set.luma_bot_offset.Address()};
|
||||
auto chroma_top{regs.surface_chroma_offsets[pic_idx].Address() +
|
||||
current_context.h264_parameter_set.chroma_top_offset.Address()};
|
||||
auto chroma_bottom{regs.surface_chroma_offsets[pic_idx].Address() +
|
||||
current_context.h264_parameter_set.chroma_bot_offset.Address()};
|
||||
return {luma_top, luma_bottom, chroma_top, chroma_bottom};
|
||||
}
|
||||
|
||||
bool H264::IsInterlaced() {
|
||||
return current_context.h264_parameter_set.luma_top_offset.Address() != 0 ||
|
||||
current_context.h264_parameter_set.luma_bot_offset.Address() != 0;
|
||||
}
|
||||
|
||||
std::span<const u8> H264::ComposeFrame() {
|
||||
memory_manager.ReadBlock(regs.picture_info_offset.Address(), ¤t_context,
|
||||
sizeof(H264DecoderContext));
|
||||
|
||||
const s64 frame_number = current_context.h264_parameter_set.frame_number.Value();
|
||||
if (!is_first_frame && frame_number != 0) {
|
||||
frame.resize_destructive(context.stream_len);
|
||||
host1x.GMMU().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
|
||||
*out_configuration_size = 0;
|
||||
return frame;
|
||||
frame_scratch.resize_destructive(current_context.stream_len);
|
||||
memory_manager.ReadBlock(regs.frame_bitstream_offset.Address(), frame_scratch.data(),
|
||||
frame_scratch.size());
|
||||
return frame_scratch;
|
||||
}
|
||||
|
||||
is_first_frame = false;
|
||||
|
||||
// Encode header
|
||||
H264BitWriter writer{};
|
||||
writer.WriteU(1, 24);
|
||||
@ -53,7 +86,7 @@ std::span<const u8> H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters
|
||||
writer.WriteU(31, 8);
|
||||
writer.WriteUe(0);
|
||||
const u32 chroma_format_idc =
|
||||
static_cast<u32>(context.h264_parameter_set.chroma_format_idc.Value());
|
||||
static_cast<u32>(current_context.h264_parameter_set.chroma_format_idc.Value());
|
||||
writer.WriteUe(chroma_format_idc);
|
||||
if (chroma_format_idc == 3) {
|
||||
writer.WriteBit(false);
|
||||
@ -61,42 +94,44 @@ std::span<const u8> H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters
|
||||
|
||||
writer.WriteUe(0);
|
||||
writer.WriteUe(0);
|
||||
writer.WriteBit(false); // QpprimeYZeroTransformBypassFlag
|
||||
writer.WriteBit(current_context.qpprime_y_zero_transform_bypass_flag.Value() != 0);
|
||||
writer.WriteBit(false); // Scaling matrix present flag
|
||||
|
||||
writer.WriteUe(static_cast<u32>(context.h264_parameter_set.log2_max_frame_num_minus4.Value()));
|
||||
writer.WriteUe(
|
||||
static_cast<u32>(current_context.h264_parameter_set.log2_max_frame_num_minus4.Value()));
|
||||
|
||||
const auto order_cnt_type =
|
||||
static_cast<u32>(context.h264_parameter_set.pic_order_cnt_type.Value());
|
||||
static_cast<u32>(current_context.h264_parameter_set.pic_order_cnt_type.Value());
|
||||
writer.WriteUe(order_cnt_type);
|
||||
if (order_cnt_type == 0) {
|
||||
writer.WriteUe(context.h264_parameter_set.log2_max_pic_order_cnt_lsb_minus4);
|
||||
writer.WriteUe(current_context.h264_parameter_set.log2_max_pic_order_cnt_lsb_minus4);
|
||||
} else if (order_cnt_type == 1) {
|
||||
writer.WriteBit(context.h264_parameter_set.delta_pic_order_always_zero_flag != 0);
|
||||
writer.WriteBit(current_context.h264_parameter_set.delta_pic_order_always_zero_flag != 0);
|
||||
|
||||
writer.WriteSe(0);
|
||||
writer.WriteSe(0);
|
||||
writer.WriteUe(0);
|
||||
}
|
||||
|
||||
const s32 pic_height = context.h264_parameter_set.frame_height_in_map_units /
|
||||
(context.h264_parameter_set.frame_mbs_only_flag ? 1 : 2);
|
||||
const s32 pic_height = current_context.h264_parameter_set.frame_height_in_mbs /
|
||||
(current_context.h264_parameter_set.frame_mbs_only_flag ? 1 : 2);
|
||||
|
||||
// TODO (ameerj): Where do we get this number, it seems to be particular for each stream
|
||||
const auto nvdec_decoding = Settings::values.nvdec_emulation.GetValue();
|
||||
const bool uses_gpu_decoding = nvdec_decoding == Settings::NvdecEmulation::Gpu;
|
||||
const u32 max_num_ref_frames = uses_gpu_decoding ? 6u : 16u;
|
||||
u32 max_num_ref_frames =
|
||||
std::max(std::max(current_context.h264_parameter_set.num_refidx_l0_default_active,
|
||||
current_context.h264_parameter_set.num_refidx_l1_default_active) +
|
||||
1,
|
||||
4);
|
||||
writer.WriteUe(max_num_ref_frames);
|
||||
writer.WriteBit(false);
|
||||
writer.WriteUe(context.h264_parameter_set.pic_width_in_mbs - 1);
|
||||
writer.WriteUe(current_context.h264_parameter_set.pic_width_in_mbs - 1);
|
||||
writer.WriteUe(pic_height - 1);
|
||||
writer.WriteBit(context.h264_parameter_set.frame_mbs_only_flag != 0);
|
||||
writer.WriteBit(current_context.h264_parameter_set.frame_mbs_only_flag != 0);
|
||||
|
||||
if (!context.h264_parameter_set.frame_mbs_only_flag) {
|
||||
writer.WriteBit(context.h264_parameter_set.flags.mbaff_frame.Value() != 0);
|
||||
if (!current_context.h264_parameter_set.frame_mbs_only_flag) {
|
||||
writer.WriteBit(current_context.h264_parameter_set.flags.mbaff_frame.Value() != 0);
|
||||
}
|
||||
|
||||
writer.WriteBit(context.h264_parameter_set.flags.direct_8x8_inference.Value() != 0);
|
||||
writer.WriteBit(current_context.h264_parameter_set.flags.direct_8x8_inference.Value() != 0);
|
||||
writer.WriteBit(false); // Frame cropping flag
|
||||
writer.WriteBit(false); // VUI parameter present flag
|
||||
|
||||
@ -111,57 +146,59 @@ std::span<const u8> H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters
|
||||
writer.WriteUe(0);
|
||||
writer.WriteUe(0);
|
||||
|
||||
writer.WriteBit(context.h264_parameter_set.entropy_coding_mode_flag != 0);
|
||||
writer.WriteBit(context.h264_parameter_set.pic_order_present_flag != 0);
|
||||
writer.WriteBit(current_context.h264_parameter_set.entropy_coding_mode_flag != 0);
|
||||
writer.WriteBit(current_context.h264_parameter_set.pic_order_present_flag != 0);
|
||||
writer.WriteUe(0);
|
||||
writer.WriteUe(context.h264_parameter_set.num_refidx_l0_default_active);
|
||||
writer.WriteUe(context.h264_parameter_set.num_refidx_l1_default_active);
|
||||
writer.WriteBit(context.h264_parameter_set.flags.weighted_pred.Value() != 0);
|
||||
writer.WriteU(static_cast<s32>(context.h264_parameter_set.weighted_bipred_idc.Value()), 2);
|
||||
s32 pic_init_qp = static_cast<s32>(context.h264_parameter_set.pic_init_qp_minus26.Value());
|
||||
writer.WriteUe(current_context.h264_parameter_set.num_refidx_l0_default_active);
|
||||
writer.WriteUe(current_context.h264_parameter_set.num_refidx_l1_default_active);
|
||||
writer.WriteBit(current_context.h264_parameter_set.flags.weighted_pred.Value() != 0);
|
||||
writer.WriteU(static_cast<s32>(current_context.h264_parameter_set.weighted_bipred_idc.Value()),
|
||||
2);
|
||||
s32 pic_init_qp =
|
||||
static_cast<s32>(current_context.h264_parameter_set.pic_init_qp_minus26.Value());
|
||||
writer.WriteSe(pic_init_qp);
|
||||
writer.WriteSe(0);
|
||||
s32 chroma_qp_index_offset =
|
||||
static_cast<s32>(context.h264_parameter_set.chroma_qp_index_offset.Value());
|
||||
static_cast<s32>(current_context.h264_parameter_set.chroma_qp_index_offset.Value());
|
||||
|
||||
writer.WriteSe(chroma_qp_index_offset);
|
||||
writer.WriteBit(context.h264_parameter_set.deblocking_filter_control_present_flag != 0);
|
||||
writer.WriteBit(context.h264_parameter_set.flags.constrained_intra_pred.Value() != 0);
|
||||
writer.WriteBit(context.h264_parameter_set.redundant_pic_cnt_present_flag != 0);
|
||||
writer.WriteBit(context.h264_parameter_set.transform_8x8_mode_flag != 0);
|
||||
writer.WriteBit(current_context.h264_parameter_set.deblocking_filter_control_present_flag != 0);
|
||||
writer.WriteBit(current_context.h264_parameter_set.flags.constrained_intra_pred.Value() != 0);
|
||||
writer.WriteBit(current_context.h264_parameter_set.redundant_pic_cnt_present_flag != 0);
|
||||
writer.WriteBit(current_context.h264_parameter_set.transform_8x8_mode_flag != 0);
|
||||
|
||||
writer.WriteBit(true); // pic_scaling_matrix_present_flag
|
||||
|
||||
for (s32 index = 0; index < 6; index++) {
|
||||
writer.WriteBit(true);
|
||||
std::span<const u8> matrix{context.weight_scale};
|
||||
writer.WriteScalingList(scan, matrix, index * 16, 16);
|
||||
std::span<const u8> matrix{current_context.weight_scale_4x4};
|
||||
writer.WriteScalingList(scan_scratch, matrix, index * 16, 16);
|
||||
}
|
||||
|
||||
if (context.h264_parameter_set.transform_8x8_mode_flag) {
|
||||
if (current_context.h264_parameter_set.transform_8x8_mode_flag) {
|
||||
for (s32 index = 0; index < 2; index++) {
|
||||
writer.WriteBit(true);
|
||||
std::span<const u8> matrix{context.weight_scale_8x8};
|
||||
writer.WriteScalingList(scan, matrix, index * 64, 64);
|
||||
std::span<const u8> matrix{current_context.weight_scale_8x8};
|
||||
writer.WriteScalingList(scan_scratch, matrix, index * 64, 64);
|
||||
}
|
||||
}
|
||||
|
||||
s32 chroma_qp_index_offset2 =
|
||||
static_cast<s32>(context.h264_parameter_set.second_chroma_qp_index_offset.Value());
|
||||
static_cast<s32>(current_context.h264_parameter_set.second_chroma_qp_index_offset.Value());
|
||||
|
||||
writer.WriteSe(chroma_qp_index_offset2);
|
||||
|
||||
writer.End();
|
||||
|
||||
const auto& encoded_header = writer.GetByteArray();
|
||||
frame.resize(encoded_header.size() + context.stream_len);
|
||||
std::memcpy(frame.data(), encoded_header.data(), encoded_header.size());
|
||||
frame_scratch.resize(encoded_header.size() + current_context.stream_len);
|
||||
std::memcpy(frame_scratch.data(), encoded_header.data(), encoded_header.size());
|
||||
|
||||
*out_configuration_size = encoded_header.size();
|
||||
host1x.GMMU().ReadBlock(state.frame_bitstream_offset, frame.data() + encoded_header.size(),
|
||||
context.stream_len);
|
||||
memory_manager.ReadBlock(regs.frame_bitstream_offset.Address(),
|
||||
frame_scratch.data() + encoded_header.size(),
|
||||
current_context.stream_len);
|
||||
|
||||
return frame;
|
||||
return frame_scratch;
|
||||
}
|
||||
|
||||
H264BitWriter::H264BitWriter() = default;
|
||||
@ -278,4 +315,4 @@ void H264BitWriter::Flush() {
|
||||
buffer = 0;
|
||||
buffer_pos = 0;
|
||||
}
|
||||
} // namespace Tegra::Decoder
|
||||
} // namespace Tegra::Decoders
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
#include "video_core/host1x/codecs/decoder.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
namespace Tegra {
|
||||
@ -18,7 +19,7 @@ namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
namespace Decoder {
|
||||
namespace Decoders {
|
||||
|
||||
class H264BitWriter {
|
||||
public:
|
||||
@ -60,123 +61,213 @@ private:
|
||||
std::vector<u8> byte_array;
|
||||
};
|
||||
|
||||
class H264 {
|
||||
public:
|
||||
explicit H264(Host1x::Host1x& host1x);
|
||||
~H264();
|
||||
|
||||
/// Compose the H264 frame for FFmpeg decoding
|
||||
[[nodiscard]] std::span<const u8> ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state,
|
||||
size_t* out_configuration_size,
|
||||
bool is_first_frame = false);
|
||||
struct Offset {
|
||||
constexpr u32 Address() const noexcept {
|
||||
return offset << 8;
|
||||
}
|
||||
|
||||
private:
|
||||
Common::ScratchBuffer<u8> frame;
|
||||
Common::ScratchBuffer<u8> scan;
|
||||
Host1x::Host1x& host1x;
|
||||
u32 offset;
|
||||
};
|
||||
static_assert(std::is_trivial_v<Offset>, "Offset must be trivial");
|
||||
static_assert(sizeof(Offset) == 0x4, "Offset has the wrong size!");
|
||||
|
||||
struct H264ParameterSet {
|
||||
s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00
|
||||
s32 delta_pic_order_always_zero_flag; ///< 0x04
|
||||
s32 frame_mbs_only_flag; ///< 0x08
|
||||
u32 pic_width_in_mbs; ///< 0x0C
|
||||
u32 frame_height_in_map_units; ///< 0x10
|
||||
union { ///< 0x14
|
||||
BitField<0, 2, u32> tile_format;
|
||||
BitField<2, 3, u32> gob_height;
|
||||
};
|
||||
u32 entropy_coding_mode_flag; ///< 0x18
|
||||
s32 pic_order_present_flag; ///< 0x1C
|
||||
s32 num_refidx_l0_default_active; ///< 0x20
|
||||
s32 num_refidx_l1_default_active; ///< 0x24
|
||||
s32 deblocking_filter_control_present_flag; ///< 0x28
|
||||
s32 redundant_pic_cnt_present_flag; ///< 0x2C
|
||||
u32 transform_8x8_mode_flag; ///< 0x30
|
||||
u32 pitch_luma; ///< 0x34
|
||||
u32 pitch_chroma; ///< 0x38
|
||||
u32 luma_top_offset; ///< 0x3C
|
||||
u32 luma_bot_offset; ///< 0x40
|
||||
u32 luma_frame_offset; ///< 0x44
|
||||
u32 chroma_top_offset; ///< 0x48
|
||||
u32 chroma_bot_offset; ///< 0x4C
|
||||
u32 chroma_frame_offset; ///< 0x50
|
||||
u32 hist_buffer_size; ///< 0x54
|
||||
union { ///< 0x58
|
||||
union {
|
||||
BitField<0, 1, u64> mbaff_frame;
|
||||
BitField<1, 1, u64> direct_8x8_inference;
|
||||
BitField<2, 1, u64> weighted_pred;
|
||||
BitField<3, 1, u64> constrained_intra_pred;
|
||||
BitField<4, 1, u64> ref_pic;
|
||||
BitField<5, 1, u64> field_pic;
|
||||
BitField<6, 1, u64> bottom_field;
|
||||
BitField<7, 1, u64> second_field;
|
||||
} flags;
|
||||
BitField<8, 4, u64> log2_max_frame_num_minus4;
|
||||
BitField<12, 2, u64> chroma_format_idc;
|
||||
BitField<14, 2, u64> pic_order_cnt_type;
|
||||
BitField<16, 6, s64> pic_init_qp_minus26;
|
||||
BitField<22, 5, s64> chroma_qp_index_offset;
|
||||
BitField<27, 5, s64> second_chroma_qp_index_offset;
|
||||
BitField<32, 2, u64> weighted_bipred_idc;
|
||||
BitField<34, 7, u64> curr_pic_idx;
|
||||
BitField<41, 5, u64> curr_col_idx;
|
||||
BitField<46, 16, u64> frame_number;
|
||||
BitField<62, 1, u64> frame_surfaces;
|
||||
BitField<63, 1, u64> output_memory_layout;
|
||||
};
|
||||
struct H264ParameterSet {
|
||||
s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00
|
||||
s32 delta_pic_order_always_zero_flag; ///< 0x04
|
||||
s32 frame_mbs_only_flag; ///< 0x08
|
||||
u32 pic_width_in_mbs; ///< 0x0C
|
||||
u32 frame_height_in_mbs; ///< 0x10
|
||||
union { ///< 0x14
|
||||
BitField<0, 2, u32> tile_format;
|
||||
BitField<2, 3, u32> gob_height;
|
||||
BitField<5, 27, u32> reserved_surface_format;
|
||||
};
|
||||
static_assert(sizeof(H264ParameterSet) == 0x60, "H264ParameterSet is an invalid size");
|
||||
|
||||
struct H264DecoderContext {
|
||||
INSERT_PADDING_WORDS_NOINIT(18); ///< 0x0000
|
||||
u32 stream_len; ///< 0x0048
|
||||
INSERT_PADDING_WORDS_NOINIT(3); ///< 0x004C
|
||||
H264ParameterSet h264_parameter_set; ///< 0x0058
|
||||
INSERT_PADDING_WORDS_NOINIT(66); ///< 0x00B8
|
||||
std::array<u8, 0x60> weight_scale; ///< 0x01C0
|
||||
std::array<u8, 0x80> weight_scale_8x8; ///< 0x0220
|
||||
u32 entropy_coding_mode_flag; ///< 0x18
|
||||
s32 pic_order_present_flag; ///< 0x1C
|
||||
s32 num_refidx_l0_default_active; ///< 0x20
|
||||
s32 num_refidx_l1_default_active; ///< 0x24
|
||||
s32 deblocking_filter_control_present_flag; ///< 0x28
|
||||
s32 redundant_pic_cnt_present_flag; ///< 0x2C
|
||||
u32 transform_8x8_mode_flag; ///< 0x30
|
||||
u32 pitch_luma; ///< 0x34
|
||||
u32 pitch_chroma; ///< 0x38
|
||||
Offset luma_top_offset; ///< 0x3C
|
||||
Offset luma_bot_offset; ///< 0x40
|
||||
Offset luma_frame_offset; ///< 0x44
|
||||
Offset chroma_top_offset; ///< 0x48
|
||||
Offset chroma_bot_offset; ///< 0x4C
|
||||
Offset chroma_frame_offset; ///< 0x50
|
||||
u32 hist_buffer_size; ///< 0x54
|
||||
union { ///< 0x58
|
||||
union {
|
||||
BitField<0, 1, u64> mbaff_frame;
|
||||
BitField<1, 1, u64> direct_8x8_inference;
|
||||
BitField<2, 1, u64> weighted_pred;
|
||||
BitField<3, 1, u64> constrained_intra_pred;
|
||||
BitField<4, 1, u64> ref_pic;
|
||||
BitField<5, 1, u64> field_pic;
|
||||
BitField<6, 1, u64> bottom_field;
|
||||
BitField<7, 1, u64> second_field;
|
||||
} flags;
|
||||
BitField<8, 4, u64> log2_max_frame_num_minus4;
|
||||
BitField<12, 2, u64> chroma_format_idc;
|
||||
BitField<14, 2, u64> pic_order_cnt_type;
|
||||
BitField<16, 6, s64> pic_init_qp_minus26;
|
||||
BitField<22, 5, s64> chroma_qp_index_offset;
|
||||
BitField<27, 5, s64> second_chroma_qp_index_offset;
|
||||
BitField<32, 2, u64> weighted_bipred_idc;
|
||||
BitField<34, 7, u64> curr_pic_idx;
|
||||
BitField<41, 5, u64> curr_col_idx;
|
||||
BitField<46, 16, u64> frame_number;
|
||||
BitField<62, 1, u64> frame_surfaces;
|
||||
BitField<63, 1, u64> output_memory_layout;
|
||||
};
|
||||
static_assert(sizeof(H264DecoderContext) == 0x2A0, "H264DecoderContext is an invalid size");
|
||||
};
|
||||
static_assert(sizeof(H264ParameterSet) == 0x60, "H264ParameterSet is an invalid size");
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(H264ParameterSet, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(log2_max_pic_order_cnt_lsb_minus4, 0x00);
|
||||
ASSERT_POSITION(delta_pic_order_always_zero_flag, 0x04);
|
||||
ASSERT_POSITION(frame_mbs_only_flag, 0x08);
|
||||
ASSERT_POSITION(pic_width_in_mbs, 0x0C);
|
||||
ASSERT_POSITION(frame_height_in_map_units, 0x10);
|
||||
ASSERT_POSITION(tile_format, 0x14);
|
||||
ASSERT_POSITION(entropy_coding_mode_flag, 0x18);
|
||||
ASSERT_POSITION(pic_order_present_flag, 0x1C);
|
||||
ASSERT_POSITION(num_refidx_l0_default_active, 0x20);
|
||||
ASSERT_POSITION(num_refidx_l1_default_active, 0x24);
|
||||
ASSERT_POSITION(deblocking_filter_control_present_flag, 0x28);
|
||||
ASSERT_POSITION(redundant_pic_cnt_present_flag, 0x2C);
|
||||
ASSERT_POSITION(transform_8x8_mode_flag, 0x30);
|
||||
ASSERT_POSITION(pitch_luma, 0x34);
|
||||
ASSERT_POSITION(pitch_chroma, 0x38);
|
||||
ASSERT_POSITION(luma_top_offset, 0x3C);
|
||||
ASSERT_POSITION(luma_bot_offset, 0x40);
|
||||
ASSERT_POSITION(luma_frame_offset, 0x44);
|
||||
ASSERT_POSITION(chroma_top_offset, 0x48);
|
||||
ASSERT_POSITION(chroma_bot_offset, 0x4C);
|
||||
ASSERT_POSITION(chroma_frame_offset, 0x50);
|
||||
ASSERT_POSITION(hist_buffer_size, 0x54);
|
||||
ASSERT_POSITION(flags, 0x58);
|
||||
ASSERT_POSITION(log2_max_pic_order_cnt_lsb_minus4, 0x00);
|
||||
ASSERT_POSITION(delta_pic_order_always_zero_flag, 0x04);
|
||||
ASSERT_POSITION(frame_mbs_only_flag, 0x08);
|
||||
ASSERT_POSITION(pic_width_in_mbs, 0x0C);
|
||||
ASSERT_POSITION(frame_height_in_mbs, 0x10);
|
||||
ASSERT_POSITION(tile_format, 0x14);
|
||||
ASSERT_POSITION(entropy_coding_mode_flag, 0x18);
|
||||
ASSERT_POSITION(pic_order_present_flag, 0x1C);
|
||||
ASSERT_POSITION(num_refidx_l0_default_active, 0x20);
|
||||
ASSERT_POSITION(num_refidx_l1_default_active, 0x24);
|
||||
ASSERT_POSITION(deblocking_filter_control_present_flag, 0x28);
|
||||
ASSERT_POSITION(redundant_pic_cnt_present_flag, 0x2C);
|
||||
ASSERT_POSITION(transform_8x8_mode_flag, 0x30);
|
||||
ASSERT_POSITION(pitch_luma, 0x34);
|
||||
ASSERT_POSITION(pitch_chroma, 0x38);
|
||||
ASSERT_POSITION(luma_top_offset, 0x3C);
|
||||
ASSERT_POSITION(luma_bot_offset, 0x40);
|
||||
ASSERT_POSITION(luma_frame_offset, 0x44);
|
||||
ASSERT_POSITION(chroma_top_offset, 0x48);
|
||||
ASSERT_POSITION(chroma_bot_offset, 0x4C);
|
||||
ASSERT_POSITION(chroma_frame_offset, 0x50);
|
||||
ASSERT_POSITION(hist_buffer_size, 0x54);
|
||||
ASSERT_POSITION(flags, 0x58);
|
||||
#undef ASSERT_POSITION
|
||||
|
||||
struct DpbEntry {
|
||||
union {
|
||||
BitField<0, 7, u32> index;
|
||||
BitField<7, 5, u32> col_idx;
|
||||
BitField<12, 2, u32> state;
|
||||
BitField<14, 1, u32> is_long_term;
|
||||
BitField<15, 1, u32> non_existing;
|
||||
BitField<16, 1, u32> is_field;
|
||||
BitField<17, 4, u32> top_field_marking;
|
||||
BitField<21, 4, u32> bottom_field_marking;
|
||||
BitField<25, 1, u32> output_memory_layout;
|
||||
BitField<26, 6, u32> reserved;
|
||||
} flags;
|
||||
std::array<u32, 2> field_order_cnt;
|
||||
u32 frame_idx;
|
||||
};
|
||||
static_assert(sizeof(DpbEntry) == 0x10, "DpbEntry has the wrong size!");
|
||||
|
||||
struct DisplayParam {
|
||||
union {
|
||||
BitField<0, 1, u32> enable_tf_output;
|
||||
BitField<1, 1, u32> vc1_map_y_flag;
|
||||
BitField<2, 3, u32> map_y_value;
|
||||
BitField<5, 1, u32> vc1_map_uv_flag;
|
||||
BitField<6, 3, u32> map_uv_value;
|
||||
BitField<9, 8, u32> out_stride;
|
||||
BitField<17, 3, u32> tiling_format;
|
||||
BitField<20, 1, u32> output_structure; // 0=frame, 1=field
|
||||
BitField<21, 11, u32> reserved0;
|
||||
};
|
||||
std::array<s32, 2> output_top;
|
||||
std::array<s32, 2> output_bottom;
|
||||
union {
|
||||
BitField<0, 1, u32> enable_histogram;
|
||||
BitField<1, 12, u32> histogram_start_x;
|
||||
BitField<13, 12, u32> histogram_start_y;
|
||||
BitField<25, 7, u32> reserved1;
|
||||
};
|
||||
union {
|
||||
BitField<0, 12, u32> histogram_end_x;
|
||||
BitField<12, 12, u32> histogram_end_y;
|
||||
BitField<24, 8, u32> reserved2;
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(DisplayParam) == 0x1C, "DisplayParam has the wrong size!");
|
||||
|
||||
struct H264DecoderContext {
|
||||
INSERT_PADDING_WORDS_NOINIT(13); ///< 0x0000
|
||||
std::array<u8, 16> eos; ///< 0x0034
|
||||
u8 explicit_eos_present_flag; ///< 0x0044
|
||||
u8 hint_dump_en; ///< 0x0045
|
||||
INSERT_PADDING_BYTES_NOINIT(2); ///< 0x0046
|
||||
u32 stream_len; ///< 0x0048
|
||||
u32 slice_count; ///< 0x004C
|
||||
u32 mbhist_buffer_size; ///< 0x0050
|
||||
u32 gptimer_timeout_value; ///< 0x0054
|
||||
H264ParameterSet h264_parameter_set; ///< 0x0058
|
||||
std::array<s32, 2> curr_field_order_cnt; ///< 0x00B8
|
||||
std::array<DpbEntry, 16> dpb; ///< 0x00C0
|
||||
std::array<u8, 0x60> weight_scale_4x4; ///< 0x01C0
|
||||
std::array<u8, 0x80> weight_scale_8x8; ///< 0x0220
|
||||
std::array<u8, 2> num_inter_view_refs_lX; ///< 0x02A0
|
||||
std::array<u8, 14> reserved2; ///< 0x02A2
|
||||
std::array<std::array<s8, 16>, 2> inter_view_refidx_lX; ///< 0x02B0
|
||||
union { ///< 0x02D0
|
||||
BitField<0, 1, u32> lossless_ipred8x8_filter_enable;
|
||||
BitField<1, 1, u32> qpprime_y_zero_transform_bypass_flag;
|
||||
BitField<2, 30, u32> reserved3;
|
||||
};
|
||||
DisplayParam display_param; ///< 0x02D4
|
||||
std::array<u32, 3> reserved4; ///< 0x02F0
|
||||
};
|
||||
static_assert(sizeof(H264DecoderContext) == 0x2FC, "H264DecoderContext is an invalid size");
|
||||
|
||||
#define ASSERT_POSITION(field_name, position) \
|
||||
static_assert(offsetof(H264DecoderContext, field_name) == position, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_POSITION(stream_len, 0x48);
|
||||
ASSERT_POSITION(h264_parameter_set, 0x58);
|
||||
ASSERT_POSITION(weight_scale, 0x1C0);
|
||||
ASSERT_POSITION(stream_len, 0x48);
|
||||
ASSERT_POSITION(h264_parameter_set, 0x58);
|
||||
ASSERT_POSITION(dpb, 0xC0);
|
||||
ASSERT_POSITION(weight_scale_4x4, 0x1C0);
|
||||
#undef ASSERT_POSITION
|
||||
|
||||
class H264 final : public Decoder {
|
||||
public:
|
||||
explicit H264(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs, s32 id,
|
||||
Host1x::FrameQueue& frame_queue);
|
||||
~H264() override;
|
||||
|
||||
H264(const H264&) = delete;
|
||||
H264& operator=(const H264&) = delete;
|
||||
|
||||
H264(H264&&) = delete;
|
||||
H264& operator=(H264&&) = delete;
|
||||
|
||||
/// Compose the H264 frame for FFmpeg decoding
|
||||
[[nodiscard]] std::span<const u8> ComposeFrame() override;
|
||||
|
||||
std::tuple<u64, u64> GetProgressiveOffsets() override;
|
||||
std::tuple<u64, u64, u64, u64> GetInterlacedOffsets() override;
|
||||
bool IsInterlaced() override;
|
||||
|
||||
std::string_view GetCurrentCodecName() const override {
|
||||
return "H264";
|
||||
}
|
||||
|
||||
private:
|
||||
bool is_first_frame{true};
|
||||
Common::ScratchBuffer<u8> frame_scratch;
|
||||
Common::ScratchBuffer<u8> scan_scratch;
|
||||
H264DecoderContext current_context{};
|
||||
};
|
||||
|
||||
} // namespace Decoder
|
||||
} // namespace Decoders
|
||||
} // namespace Tegra
|
||||
|
@ -7,47 +7,70 @@
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Tegra::Decoder {
|
||||
VP8::VP8(Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||
namespace Tegra::Decoders {
|
||||
VP8::VP8(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs_, s32 id_,
|
||||
Host1x::FrameQueue& frame_queue_)
|
||||
: Decoder{host1x_, id_, regs_, frame_queue_} {
|
||||
codec = Host1x::NvdecCommon::VideoCodec::VP8;
|
||||
initialized = decode_api.Initialize(codec);
|
||||
}
|
||||
|
||||
VP8::~VP8() = default;
|
||||
|
||||
std::span<const u8> VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
|
||||
VP8PictureInfo info;
|
||||
host1x.GMMU().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
|
||||
std::tuple<u64, u64> VP8::GetProgressiveOffsets() {
|
||||
auto luma{regs.surface_luma_offsets[static_cast<u32>(Vp8SurfaceIndex::Current)].Address()};
|
||||
auto chroma{regs.surface_chroma_offsets[static_cast<u32>(Vp8SurfaceIndex::Current)].Address()};
|
||||
return {luma, chroma};
|
||||
}
|
||||
|
||||
const bool is_key_frame = info.key_frame == 1u;
|
||||
const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size);
|
||||
std::tuple<u64, u64, u64, u64> VP8::GetInterlacedOffsets() {
|
||||
auto luma_top{regs.surface_luma_offsets[static_cast<u32>(Vp8SurfaceIndex::Current)].Address()};
|
||||
auto luma_bottom{
|
||||
regs.surface_luma_offsets[static_cast<u32>(Vp8SurfaceIndex::Current)].Address()};
|
||||
auto chroma_top{
|
||||
regs.surface_chroma_offsets[static_cast<u32>(Vp8SurfaceIndex::Current)].Address()};
|
||||
auto chroma_bottom{
|
||||
regs.surface_chroma_offsets[static_cast<u32>(Vp8SurfaceIndex::Current)].Address()};
|
||||
return {luma_top, luma_bottom, chroma_top, chroma_bottom};
|
||||
}
|
||||
|
||||
std::span<const u8> VP8::ComposeFrame() {
|
||||
memory_manager.ReadBlock(regs.picture_info_offset.Address(), ¤t_context,
|
||||
sizeof(VP8PictureInfo));
|
||||
|
||||
const bool is_key_frame = current_context.key_frame == 1u;
|
||||
const auto bitstream_size = static_cast<size_t>(current_context.vld_buffer_size);
|
||||
const size_t header_size = is_key_frame ? 10u : 3u;
|
||||
frame.resize(header_size + bitstream_size);
|
||||
frame_scratch.resize(header_size + bitstream_size);
|
||||
|
||||
// Based on page 30 of the VP8 specification.
|
||||
// https://datatracker.ietf.org/doc/rfc6386/
|
||||
frame[0] = is_key_frame ? 0u : 1u; // 1-bit frame type (0: keyframe, 1: interframes).
|
||||
frame[0] |= static_cast<u8>((info.version & 7u) << 1u); // 3-bit version number
|
||||
frame[0] |= static_cast<u8>(1u << 4u); // 1-bit show_frame flag
|
||||
frame_scratch[0] = is_key_frame ? 0u : 1u; // 1-bit frame type (0: keyframe, 1: interframes).
|
||||
frame_scratch[0] |=
|
||||
static_cast<u8>((current_context.version & 7u) << 1u); // 3-bit version number
|
||||
frame_scratch[0] |= static_cast<u8>(1u << 4u); // 1-bit show_frame flag
|
||||
|
||||
// The next 19-bits are the first partition size
|
||||
frame[0] |= static_cast<u8>((info.first_part_size & 7u) << 5u);
|
||||
frame[1] = static_cast<u8>((info.first_part_size & 0x7f8u) >> 3u);
|
||||
frame[2] = static_cast<u8>((info.first_part_size & 0x7f800u) >> 11u);
|
||||
frame_scratch[0] |= static_cast<u8>((current_context.first_part_size & 7u) << 5u);
|
||||
frame_scratch[1] = static_cast<u8>((current_context.first_part_size & 0x7f8u) >> 3u);
|
||||
frame_scratch[2] = static_cast<u8>((current_context.first_part_size & 0x7f800u) >> 11u);
|
||||
|
||||
if (is_key_frame) {
|
||||
frame[3] = 0x9du;
|
||||
frame[4] = 0x01u;
|
||||
frame[5] = 0x2au;
|
||||
frame_scratch[3] = 0x9du;
|
||||
frame_scratch[4] = 0x01u;
|
||||
frame_scratch[5] = 0x2au;
|
||||
// TODO(ameerj): Horizontal/Vertical Scale
|
||||
// 16 bits: (2 bits Horizontal Scale << 14) | Width (14 bits)
|
||||
frame[6] = static_cast<u8>(info.frame_width & 0xff);
|
||||
frame[7] = static_cast<u8>(((info.frame_width >> 8) & 0x3f));
|
||||
frame_scratch[6] = static_cast<u8>(current_context.frame_width & 0xff);
|
||||
frame_scratch[7] = static_cast<u8>(((current_context.frame_width >> 8) & 0x3f));
|
||||
// 16 bits:(2 bits Vertical Scale << 14) | Height (14 bits)
|
||||
frame[8] = static_cast<u8>(info.frame_height & 0xff);
|
||||
frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f));
|
||||
frame_scratch[8] = static_cast<u8>(current_context.frame_height & 0xff);
|
||||
frame_scratch[9] = static_cast<u8>(((current_context.frame_height >> 8) & 0x3f));
|
||||
}
|
||||
const u64 bitstream_offset = state.frame_bitstream_offset;
|
||||
host1x.GMMU().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
|
||||
const u64 bitstream_offset = regs.frame_bitstream_offset.Address();
|
||||
memory_manager.ReadBlock(bitstream_offset, frame_scratch.data() + header_size, bitstream_size);
|
||||
|
||||
return frame;
|
||||
return frame_scratch;
|
||||
}
|
||||
|
||||
} // namespace Tegra::Decoder
|
||||
} // namespace Tegra::Decoders
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
#include "video_core/host1x/codecs/decoder.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
namespace Tegra {
|
||||
@ -17,20 +18,41 @@ namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
namespace Decoder {
|
||||
namespace Decoders {
|
||||
enum class Vp8SurfaceIndex : u32 {
|
||||
Last = 0,
|
||||
Golden = 1,
|
||||
AltRef = 2,
|
||||
Current = 3,
|
||||
};
|
||||
|
||||
class VP8 {
|
||||
class VP8 final : public Decoder {
|
||||
public:
|
||||
explicit VP8(Host1x::Host1x& host1x);
|
||||
~VP8();
|
||||
explicit VP8(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs, s32 id,
|
||||
Host1x::FrameQueue& frame_queue);
|
||||
~VP8() override;
|
||||
|
||||
/// Compose the VP8 frame for FFmpeg decoding
|
||||
[[nodiscard]] std::span<const u8> ComposeFrame(
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
VP8(const VP8&) = delete;
|
||||
VP8& operator=(const VP8&) = delete;
|
||||
|
||||
VP8(VP8&&) = delete;
|
||||
VP8& operator=(VP8&&) = delete;
|
||||
|
||||
[[nodiscard]] std::span<const u8> ComposeFrame() override;
|
||||
|
||||
std::tuple<u64, u64> GetProgressiveOffsets() override;
|
||||
std::tuple<u64, u64, u64, u64> GetInterlacedOffsets() override;
|
||||
|
||||
bool IsInterlaced() override {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string_view GetCurrentCodecName() const override {
|
||||
return "VP8";
|
||||
}
|
||||
|
||||
private:
|
||||
Common::ScratchBuffer<u8> frame;
|
||||
Host1x::Host1x& host1x;
|
||||
Common::ScratchBuffer<u8> frame_scratch;
|
||||
|
||||
struct VP8PictureInfo {
|
||||
INSERT_PADDING_WORDS_NOINIT(14);
|
||||
@ -73,7 +95,9 @@ private:
|
||||
INSERT_PADDING_WORDS_NOINIT(3);
|
||||
};
|
||||
static_assert(sizeof(VP8PictureInfo) == 0xc0, "PictureInfo is an invalid size");
|
||||
|
||||
VP8PictureInfo current_context{};
|
||||
};
|
||||
|
||||
} // namespace Decoder
|
||||
} // namespace Decoders
|
||||
} // namespace Tegra
|
||||
|
@ -4,12 +4,13 @@
|
||||
#include <algorithm> // for std::copy
|
||||
#include <numeric>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "video_core/host1x/codecs/vp9.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
namespace Tegra::Decoder {
|
||||
namespace Tegra::Decoders {
|
||||
namespace {
|
||||
constexpr u32 diff_update_probability = 252;
|
||||
constexpr u32 frame_sync_code = 0x498342;
|
||||
@ -237,7 +238,12 @@ constexpr std::array<u8, 254> map_lut{
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
VP9::VP9(Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||
VP9::VP9(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs_, s32 id_,
|
||||
Host1x::FrameQueue& frame_queue_)
|
||||
: Decoder{host1x_, id_, regs_, frame_queue_} {
|
||||
codec = Host1x::NvdecCommon::VideoCodec::VP9;
|
||||
initialized = decode_api.Initialize(codec);
|
||||
}
|
||||
|
||||
VP9::~VP9() = default;
|
||||
|
||||
@ -356,35 +362,113 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_
|
||||
}
|
||||
}
|
||||
|
||||
Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) {
|
||||
PictureInfo picture_info;
|
||||
host1x.GMMU().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
|
||||
Vp9PictureInfo vp9_info = picture_info.Convert();
|
||||
void VP9::WriteSegmentation(VpxBitStreamWriter& writer) {
|
||||
bool enabled = current_picture_info.segmentation.enabled != 0;
|
||||
writer.WriteBit(enabled);
|
||||
if (!enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy);
|
||||
auto update_map = current_picture_info.segmentation.update_map != 0;
|
||||
writer.WriteBit(update_map);
|
||||
|
||||
if (update_map) {
|
||||
EntropyProbs entropy_probs{};
|
||||
memory_manager.ReadBlock(regs.vp9_prob_tab_buffer_offset.Address(), &entropy_probs,
|
||||
sizeof(entropy_probs));
|
||||
|
||||
auto WriteProb = [&](u8 prob) {
|
||||
bool coded = prob != 255;
|
||||
writer.WriteBit(coded);
|
||||
if (coded) {
|
||||
writer.WriteU(prob, 8);
|
||||
}
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < entropy_probs.mb_segment_tree_probs.size(); i++) {
|
||||
WriteProb(entropy_probs.mb_segment_tree_probs[i]);
|
||||
}
|
||||
|
||||
auto temporal_update = current_picture_info.segmentation.temporal_update != 0;
|
||||
writer.WriteBit(temporal_update);
|
||||
|
||||
if (temporal_update) {
|
||||
for (s32 i = 0; i < 3; i++) {
|
||||
WriteProb(entropy_probs.segment_pred_probs[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (last_segmentation == current_picture_info.segmentation) {
|
||||
writer.WriteBit(false);
|
||||
return;
|
||||
}
|
||||
|
||||
last_segmentation = current_picture_info.segmentation;
|
||||
writer.WriteBit(true);
|
||||
writer.WriteBit(current_picture_info.segmentation.abs_delta != 0);
|
||||
|
||||
constexpr s32 MAX_SEGMENTS = 8;
|
||||
constexpr std::array SegmentationFeatureBits = {8, 6, 2, 0};
|
||||
|
||||
for (s32 i = 0; i < MAX_SEGMENTS; i++) {
|
||||
auto q_enabled = current_picture_info.segmentation.feature_enabled[i][0] != 0;
|
||||
writer.WriteBit(q_enabled);
|
||||
if (q_enabled) {
|
||||
writer.WriteS(current_picture_info.segmentation.feature_data[i][0],
|
||||
SegmentationFeatureBits[0]);
|
||||
}
|
||||
|
||||
auto lf_enabled = current_picture_info.segmentation.feature_enabled[i][1] != 0;
|
||||
writer.WriteBit(lf_enabled);
|
||||
if (lf_enabled) {
|
||||
writer.WriteS(current_picture_info.segmentation.feature_data[i][1],
|
||||
SegmentationFeatureBits[1]);
|
||||
}
|
||||
|
||||
auto ref_enabled = current_picture_info.segmentation.feature_enabled[i][2] != 0;
|
||||
writer.WriteBit(ref_enabled);
|
||||
if (ref_enabled) {
|
||||
writer.WriteU(current_picture_info.segmentation.feature_data[i][2],
|
||||
SegmentationFeatureBits[2]);
|
||||
}
|
||||
|
||||
auto skip_enabled = current_picture_info.segmentation.feature_enabled[i][3] != 0;
|
||||
writer.WriteBit(skip_enabled);
|
||||
}
|
||||
}
|
||||
|
||||
Vp9PictureInfo VP9::GetVp9PictureInfo() {
|
||||
memory_manager.ReadBlock(regs.picture_info_offset.Address(), ¤t_picture_info,
|
||||
sizeof(PictureInfo));
|
||||
Vp9PictureInfo vp9_info = current_picture_info.Convert();
|
||||
|
||||
InsertEntropy(regs.vp9_prob_tab_buffer_offset.Address(), vp9_info.entropy);
|
||||
|
||||
// surface_luma_offset[0:3] contains the address of the reference frame offsets in the following
|
||||
// order: last, golden, altref, current.
|
||||
std::copy(state.surface_luma_offset.begin(), state.surface_luma_offset.begin() + 4,
|
||||
vp9_info.frame_offsets.begin());
|
||||
for (size_t i = 0; i < 4; i++) {
|
||||
vp9_info.frame_offsets[i] = regs.surface_luma_offsets[i].Address();
|
||||
}
|
||||
|
||||
return vp9_info;
|
||||
}
|
||||
|
||||
void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) {
|
||||
EntropyProbs entropy;
|
||||
host1x.GMMU().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
|
||||
memory_manager.ReadBlock(offset, &entropy, sizeof(EntropyProbs));
|
||||
entropy.Convert(dst);
|
||||
}
|
||||
|
||||
Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
|
||||
Vp9FrameContainer VP9::GetCurrentFrame() {
|
||||
Vp9FrameContainer current_frame{};
|
||||
{
|
||||
// gpu.SyncGuestHost(); epic, why?
|
||||
current_frame.info = GetVp9PictureInfo(state);
|
||||
current_frame.info = GetVp9PictureInfo();
|
||||
current_frame.bit_stream.resize(current_frame.info.bitstream_size);
|
||||
host1x.GMMU().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(),
|
||||
current_frame.info.bitstream_size);
|
||||
memory_manager.ReadBlock(regs.frame_bitstream_offset.Address(),
|
||||
current_frame.bit_stream.data(),
|
||||
current_frame.info.bitstream_size);
|
||||
}
|
||||
if (!next_frame.bit_stream.empty()) {
|
||||
Vp9FrameContainer temp{
|
||||
@ -742,8 +826,7 @@ VpxBitStreamWriter VP9::ComposeUncompressedHeader() {
|
||||
uncomp_writer.WriteDeltaQ(current_frame_info.uv_dc_delta_q);
|
||||
uncomp_writer.WriteDeltaQ(current_frame_info.uv_ac_delta_q);
|
||||
|
||||
ASSERT(!current_frame_info.segment_enabled);
|
||||
uncomp_writer.WriteBit(false); // Segmentation enabled (TODO).
|
||||
WriteSegmentation(uncomp_writer);
|
||||
|
||||
const s32 min_tile_cols_log2 = CalcMinLog2TileCols(current_frame_info.frame_size.width);
|
||||
const s32 max_tile_cols_log2 = CalcMaxLog2TileCols(current_frame_info.frame_size.width);
|
||||
@ -770,10 +853,29 @@ VpxBitStreamWriter VP9::ComposeUncompressedHeader() {
|
||||
return uncomp_writer;
|
||||
}
|
||||
|
||||
void VP9::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
|
||||
std::tuple<u64, u64> VP9::GetProgressiveOffsets() {
|
||||
auto luma{regs.surface_luma_offsets[static_cast<u32>(Vp9SurfaceIndex::Current)].Address()};
|
||||
auto chroma{regs.surface_chroma_offsets[static_cast<u32>(Vp9SurfaceIndex::Current)].Address()};
|
||||
return {luma, chroma};
|
||||
}
|
||||
|
||||
std::tuple<u64, u64, u64, u64> VP9::GetInterlacedOffsets() {
|
||||
auto luma_top{regs.surface_luma_offsets[static_cast<u32>(Vp9SurfaceIndex::Current)].Address()};
|
||||
auto luma_bottom{
|
||||
regs.surface_luma_offsets[static_cast<u32>(Vp9SurfaceIndex::Current)].Address()};
|
||||
auto chroma_top{
|
||||
regs.surface_chroma_offsets[static_cast<u32>(Vp9SurfaceIndex::Current)].Address()};
|
||||
auto chroma_bottom{
|
||||
regs.surface_chroma_offsets[static_cast<u32>(Vp9SurfaceIndex::Current)].Address()};
|
||||
return {luma_top, luma_bottom, chroma_top, chroma_bottom};
|
||||
}
|
||||
|
||||
std::span<const u8> VP9::ComposeFrame() {
|
||||
vp9_hidden_frame = false;
|
||||
|
||||
std::vector<u8> bitstream;
|
||||
{
|
||||
Vp9FrameContainer curr_frame = GetCurrentFrame(state);
|
||||
Vp9FrameContainer curr_frame = GetCurrentFrame();
|
||||
current_frame_info = curr_frame.info;
|
||||
bitstream = std::move(curr_frame.bit_stream);
|
||||
}
|
||||
@ -786,12 +888,16 @@ void VP9::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
|
||||
std::vector<u8> uncompressed_header = uncomp_writer.GetByteArray();
|
||||
|
||||
// Write headers and frame to buffer
|
||||
frame.resize(uncompressed_header.size() + compressed_header.size() + bitstream.size());
|
||||
std::copy(uncompressed_header.begin(), uncompressed_header.end(), frame.begin());
|
||||
frame_scratch.resize(uncompressed_header.size() + compressed_header.size() + bitstream.size());
|
||||
std::copy(uncompressed_header.begin(), uncompressed_header.end(), frame_scratch.begin());
|
||||
std::copy(compressed_header.begin(), compressed_header.end(),
|
||||
frame.begin() + uncompressed_header.size());
|
||||
frame_scratch.begin() + uncompressed_header.size());
|
||||
std::copy(bitstream.begin(), bitstream.end(),
|
||||
frame.begin() + uncompressed_header.size() + compressed_header.size());
|
||||
frame_scratch.begin() + uncompressed_header.size() + compressed_header.size());
|
||||
|
||||
vp9_hidden_frame = WasFrameHidden();
|
||||
|
||||
return GetFrameBytes();
|
||||
}
|
||||
|
||||
VpxRangeEncoder::VpxRangeEncoder() {
|
||||
@ -944,4 +1050,4 @@ const std::vector<u8>& VpxBitStreamWriter::GetByteArray() const {
|
||||
return byte_array;
|
||||
}
|
||||
|
||||
} // namespace Tegra::Decoder
|
||||
} // namespace Tegra::Decoders
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
#include "common/stream.h"
|
||||
#include "video_core/host1x/codecs/decoder.h"
|
||||
#include "video_core/host1x/codecs/vp9_types.h"
|
||||
#include "video_core/host1x/nvdec_common.h"
|
||||
|
||||
@ -19,7 +20,7 @@ namespace Host1x {
|
||||
class Host1x;
|
||||
} // namespace Host1x
|
||||
|
||||
namespace Decoder {
|
||||
namespace Decoders {
|
||||
|
||||
/// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the
|
||||
/// VP9 header bitstreams.
|
||||
@ -110,21 +111,32 @@ private:
|
||||
std::vector<u8> byte_array;
|
||||
};
|
||||
|
||||
class VP9 {
|
||||
class VP9 final : public Decoder {
|
||||
public:
|
||||
explicit VP9(Host1x::Host1x& host1x);
|
||||
~VP9();
|
||||
explicit VP9(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs, s32 id,
|
||||
Host1x::FrameQueue& frame_queue);
|
||||
~VP9() override;
|
||||
|
||||
VP9(const VP9&) = delete;
|
||||
VP9& operator=(const VP9&) = delete;
|
||||
|
||||
VP9(VP9&&) = default;
|
||||
VP9(VP9&&) = delete;
|
||||
VP9& operator=(VP9&&) = delete;
|
||||
|
||||
/// Composes the VP9 frame from the GPU state information.
|
||||
/// Based on the official VP9 spec documentation
|
||||
void ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
[[nodiscard]] std::span<const u8> ComposeFrame() override;
|
||||
|
||||
std::tuple<u64, u64> GetProgressiveOffsets() override;
|
||||
std::tuple<u64, u64, u64, u64> GetInterlacedOffsets() override;
|
||||
|
||||
bool IsInterlaced() override {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string_view GetCurrentCodecName() const override {
|
||||
return "VP9";
|
||||
}
|
||||
|
||||
private:
|
||||
/// Returns true if the most recent frame was a hidden frame.
|
||||
[[nodiscard]] bool WasFrameHidden() const {
|
||||
return !current_frame_info.show_frame;
|
||||
@ -132,10 +144,9 @@ public:
|
||||
|
||||
/// Returns a const span to the composed frame data.
|
||||
[[nodiscard]] std::span<const u8> GetFrameBytes() const {
|
||||
return frame;
|
||||
return frame_scratch;
|
||||
}
|
||||
|
||||
private:
|
||||
/// Generates compressed header probability updates in the bitstream writer
|
||||
template <typename T, std::size_t N>
|
||||
void WriteProbabilityUpdate(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
|
||||
@ -167,23 +178,22 @@ private:
|
||||
/// Write motion vector probability updates. 6.3.17 in the spec
|
||||
void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
|
||||
|
||||
void WriteSegmentation(VpxBitStreamWriter& writer);
|
||||
|
||||
/// Returns VP9 information from NVDEC provided offset and size
|
||||
[[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
[[nodiscard]] Vp9PictureInfo GetVp9PictureInfo();
|
||||
|
||||
/// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct
|
||||
void InsertEntropy(u64 offset, Vp9EntropyProbs& dst);
|
||||
|
||||
/// Returns frame to be decoded after buffering
|
||||
[[nodiscard]] Vp9FrameContainer GetCurrentFrame(
|
||||
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||
[[nodiscard]] Vp9FrameContainer GetCurrentFrame();
|
||||
|
||||
/// Use NVDEC providied information to compose the headers for the current frame
|
||||
[[nodiscard]] std::vector<u8> ComposeCompressedHeader();
|
||||
[[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader();
|
||||
|
||||
Host1x::Host1x& host1x;
|
||||
Common::ScratchBuffer<u8> frame;
|
||||
Common::ScratchBuffer<u8> frame_scratch;
|
||||
|
||||
std::array<s8, 4> loop_filter_ref_deltas{};
|
||||
std::array<s8, 2> loop_filter_mode_deltas{};
|
||||
@ -192,9 +202,11 @@ private:
|
||||
std::array<Vp9EntropyProbs, 4> frame_ctxs{};
|
||||
bool swap_ref_indices{};
|
||||
|
||||
Segmentation last_segmentation{};
|
||||
PictureInfo current_picture_info{};
|
||||
Vp9PictureInfo current_frame_info{};
|
||||
Vp9EntropyProbs prev_frame_probs{};
|
||||
};
|
||||
|
||||
} // namespace Decoder
|
||||
} // namespace Decoders
|
||||
} // namespace Tegra
|
||||
|
@ -11,7 +11,14 @@
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Decoder {
|
||||
namespace Decoders {
|
||||
enum class Vp9SurfaceIndex : u32 {
|
||||
Last = 0,
|
||||
Golden = 1,
|
||||
AltRef = 2,
|
||||
Current = 3,
|
||||
};
|
||||
|
||||
struct Vp9FrameDimensions {
|
||||
s16 width;
|
||||
s16 height;
|
||||
@ -48,11 +55,13 @@ enum class TxMode {
|
||||
};
|
||||
|
||||
struct Segmentation {
|
||||
constexpr bool operator==(const Segmentation& rhs) const = default;
|
||||
|
||||
u8 enabled;
|
||||
u8 update_map;
|
||||
u8 temporal_update;
|
||||
u8 abs_delta;
|
||||
std::array<u32, 8> feature_mask;
|
||||
std::array<std::array<u8, 4>, 8> feature_enabled;
|
||||
std::array<std::array<s16, 4>, 8> feature_data;
|
||||
};
|
||||
static_assert(sizeof(Segmentation) == 0x64, "Segmentation is an invalid size");
|
||||
@ -190,7 +199,17 @@ struct PictureInfo {
|
||||
static_assert(sizeof(PictureInfo) == 0x100, "PictureInfo is an invalid size");
|
||||
|
||||
struct EntropyProbs {
|
||||
INSERT_PADDING_BYTES_NOINIT(1024); ///< 0x0000
|
||||
std::array<u8, 10 * 10 * 8> kf_bmode_prob; ///< 0x0000
|
||||
std::array<u8, 10 * 10 * 1> kf_bmode_probB; ///< 0x0320
|
||||
std::array<u8, 3> ref_pred_probs; ///< 0x0384
|
||||
std::array<u8, 7> mb_segment_tree_probs; ///< 0x0387
|
||||
std::array<u8, 3> segment_pred_probs; ///< 0x038E
|
||||
std::array<u8, 4> ref_scores; ///< 0x0391
|
||||
std::array<u8, 2> prob_comppred; ///< 0x0395
|
||||
INSERT_PADDING_BYTES_NOINIT(9); ///< 0x0397
|
||||
std::array<u8, 10 * 8> kf_uv_mode_prob; ///< 0x03A0
|
||||
std::array<u8, 10 * 1> kf_uv_mode_probB; ///< 0x03F0
|
||||
INSERT_PADDING_BYTES_NOINIT(6); ///< 0x03FA
|
||||
std::array<u8, 28> inter_mode_prob; ///< 0x0400
|
||||
std::array<u8, 4> intra_inter_prob; ///< 0x041C
|
||||
INSERT_PADDING_BYTES_NOINIT(80); ///< 0x0420
|
||||
@ -302,5 +321,5 @@ ASSERT_POSITION(class_0_fr, 0x560);
|
||||
ASSERT_POSITION(coef_probs, 0x5A0);
|
||||
#undef ASSERT_POSITION
|
||||
|
||||
}; // namespace Decoder
|
||||
}; // namespace Decoders
|
||||
}; // namespace Tegra
|
||||
|
@ -27,6 +27,7 @@ void Control::ProcessMethod(Method method, u32 argument) {
|
||||
}
|
||||
|
||||
void Control::Execute(u32 data) {
|
||||
LOG_TRACE(Service_NVDRV, "Control wait syncpt {} value {}", data, syncpoint_value);
|
||||
host1x.GetSyncpointManager().WaitHost(data, syncpoint_value);
|
||||
}
|
||||
|
||||
|
@ -6,9 +6,7 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
namespace Tegra::Host1x {
|
||||
|
||||
class Host1x;
|
||||
class Nvdec;
|
||||
@ -31,10 +29,8 @@ private:
|
||||
/// For Host1x, execute is waiting on a syncpoint previously written into the state
|
||||
void Execute(u32 data);
|
||||
|
||||
u32 syncpoint_value{};
|
||||
Host1x& host1x;
|
||||
u32 syncpoint_value{};
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
} // namespace Tegra::Host1x
|
||||
|
@ -5,7 +5,9 @@
|
||||
#include "common/logging/log.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/host1x/ffmpeg/ffmpeg.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
|
||||
extern "C" {
|
||||
#ifdef LIBVA_FOUND
|
||||
@ -149,6 +151,7 @@ bool HardwareContext::InitializeForDecoder(DecoderContext& decoder_context,
|
||||
}
|
||||
}
|
||||
|
||||
LOG_INFO(HW_GPU, "Hardware decoding is disabled due to implementation issues, using CPU.");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -183,8 +186,8 @@ bool HardwareContext::InitializeWithType(AVHWDeviceType type) {
|
||||
return true;
|
||||
}
|
||||
|
||||
DecoderContext::DecoderContext(const Decoder& decoder) {
|
||||
m_codec_context = avcodec_alloc_context3(decoder.GetCodec());
|
||||
DecoderContext::DecoderContext(const Decoder& decoder) : m_decoder{decoder} {
|
||||
m_codec_context = avcodec_alloc_context3(m_decoder.GetCodec());
|
||||
av_opt_set(m_codec_context->priv_data, "tune", "zerolatency", 0);
|
||||
m_codec_context->thread_count = 0;
|
||||
m_codec_context->thread_type &= ~FF_THREAD_FRAME;
|
||||
@ -216,6 +219,25 @@ bool DecoderContext::OpenContext(const Decoder& decoder) {
|
||||
}
|
||||
|
||||
bool DecoderContext::SendPacket(const Packet& packet) {
|
||||
m_temp_frame = std::make_shared<Frame>();
|
||||
m_got_frame = 0;
|
||||
|
||||
// Android can randomly crash when calling decode directly, so skip.
|
||||
// TODO update ffmpeg and hope that fixes it.
|
||||
#ifndef ANDROID
|
||||
if (!m_codec_context->hw_device_ctx && m_codec_context->codec_id == AV_CODEC_ID_H264) {
|
||||
m_decode_order = true;
|
||||
auto* codec{ffcodec(m_decoder.GetCodec())};
|
||||
if (const int ret = codec->cb.decode(m_codec_context, m_temp_frame->GetFrame(),
|
||||
&m_got_frame, packet.GetPacket());
|
||||
ret < 0) {
|
||||
LOG_DEBUG(Service_NVDRV, "avcodec_send_packet error {}", AVError(ret));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (const int ret = avcodec_send_packet(m_codec_context, packet.GetPacket()); ret < 0) {
|
||||
LOG_ERROR(HW_GPU, "avcodec_send_packet error: {}", AVError(ret));
|
||||
return false;
|
||||
@ -224,139 +246,73 @@ bool DecoderContext::SendPacket(const Packet& packet) {
|
||||
return true;
|
||||
}
|
||||
|
||||
std::unique_ptr<Frame> DecoderContext::ReceiveFrame(bool* out_is_interlaced) {
|
||||
auto dst_frame = std::make_unique<Frame>();
|
||||
std::shared_ptr<Frame> DecoderContext::ReceiveFrame() {
|
||||
// Android can randomly crash when calling decode directly, so skip.
|
||||
// TODO update ffmpeg and hope that fixes it.
|
||||
#ifndef ANDROID
|
||||
if (!m_codec_context->hw_device_ctx && m_codec_context->codec_id == AV_CODEC_ID_H264) {
|
||||
m_decode_order = true;
|
||||
auto* codec{ffcodec(m_decoder.GetCodec())};
|
||||
int ret{0};
|
||||
|
||||
const auto ReceiveImpl = [&](AVFrame* frame) {
|
||||
if (const int ret = avcodec_receive_frame(m_codec_context, frame); ret < 0) {
|
||||
LOG_ERROR(HW_GPU, "avcodec_receive_frame error: {}", AVError(ret));
|
||||
return false;
|
||||
if (m_got_frame == 0) {
|
||||
Packet packet{{}};
|
||||
auto* pkt = packet.GetPacket();
|
||||
pkt->data = nullptr;
|
||||
pkt->size = 0;
|
||||
ret = codec->cb.decode(m_codec_context, m_temp_frame->GetFrame(), &m_got_frame, pkt);
|
||||
m_codec_context->has_b_frames = 0;
|
||||
}
|
||||
|
||||
*out_is_interlaced =
|
||||
#if defined(FF_API_INTERLACED_FRAME) || LIBAVUTIL_VERSION_MAJOR >= 59
|
||||
(frame->flags & AV_FRAME_FLAG_INTERLACED) != 0;
|
||||
#else
|
||||
frame->interlaced_frame != 0;
|
||||
if (m_got_frame == 0 || ret < 0) {
|
||||
LOG_ERROR(Service_NVDRV, "Failed to receive a frame! error {}", ret);
|
||||
return {};
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
return true;
|
||||
};
|
||||
{
|
||||
|
||||
if (m_codec_context->hw_device_ctx) {
|
||||
// If we have a hardware context, make a separate frame here to receive the
|
||||
// hardware result before sending it to the output.
|
||||
Frame intermediate_frame;
|
||||
const auto ReceiveImpl = [&](AVFrame* frame) {
|
||||
if (const int ret = avcodec_receive_frame(m_codec_context, frame); ret < 0) {
|
||||
LOG_ERROR(HW_GPU, "avcodec_receive_frame error: {}", AVError(ret));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ReceiveImpl(intermediate_frame.GetFrame())) {
|
||||
return {};
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
dst_frame->SetFormat(PreferredGpuFormat);
|
||||
if (const int ret =
|
||||
av_hwframe_transfer_data(dst_frame->GetFrame(), intermediate_frame.GetFrame(), 0);
|
||||
ret < 0) {
|
||||
LOG_ERROR(HW_GPU, "av_hwframe_transfer_data error: {}", AVError(ret));
|
||||
return {};
|
||||
}
|
||||
} else {
|
||||
// Otherwise, decode the frame as normal.
|
||||
if (!ReceiveImpl(dst_frame->GetFrame())) {
|
||||
return {};
|
||||
if (m_codec_context->hw_device_ctx) {
|
||||
// If we have a hardware context, make a separate frame here to receive the
|
||||
// hardware result before sending it to the output.
|
||||
Frame intermediate_frame;
|
||||
|
||||
if (!ReceiveImpl(intermediate_frame.GetFrame())) {
|
||||
return {};
|
||||
}
|
||||
|
||||
m_temp_frame->SetFormat(PreferredGpuFormat);
|
||||
if (const int ret = av_hwframe_transfer_data(m_temp_frame->GetFrame(),
|
||||
intermediate_frame.GetFrame(), 0);
|
||||
ret < 0) {
|
||||
LOG_ERROR(HW_GPU, "av_hwframe_transfer_data error: {}", AVError(ret));
|
||||
return {};
|
||||
}
|
||||
} else {
|
||||
// Otherwise, decode the frame as normal.
|
||||
if (!ReceiveImpl(m_temp_frame->GetFrame())) {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dst_frame;
|
||||
}
|
||||
|
||||
DeinterlaceFilter::DeinterlaceFilter(const Frame& frame) {
|
||||
const AVFilter* buffer_src = avfilter_get_by_name("buffer");
|
||||
const AVFilter* buffer_sink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut* inputs = avfilter_inout_alloc();
|
||||
AVFilterInOut* outputs = avfilter_inout_alloc();
|
||||
SCOPE_EXIT({
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
});
|
||||
|
||||
// Don't know how to get the accurate time_base but it doesn't matter for yadif filter
|
||||
// so just use 1/1 to make buffer filter happy
|
||||
std::string args = fmt::format("video_size={}x{}:pix_fmt={}:time_base=1/1", frame.GetWidth(),
|
||||
frame.GetHeight(), static_cast<int>(frame.GetPixelFormat()));
|
||||
|
||||
m_filter_graph = avfilter_graph_alloc();
|
||||
int ret = avfilter_graph_create_filter(&m_source_context, buffer_src, "in", args.c_str(),
|
||||
nullptr, m_filter_graph);
|
||||
if (ret < 0) {
|
||||
LOG_ERROR(HW_GPU, "avfilter_graph_create_filter source error: {}", AVError(ret));
|
||||
return;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&m_sink_context, buffer_sink, "out", nullptr, nullptr,
|
||||
m_filter_graph);
|
||||
if (ret < 0) {
|
||||
LOG_ERROR(HW_GPU, "avfilter_graph_create_filter sink error: {}", AVError(ret));
|
||||
return;
|
||||
}
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = m_sink_context;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = nullptr;
|
||||
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = m_source_context;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = nullptr;
|
||||
|
||||
const char* description = "yadif=1:-1:0";
|
||||
ret = avfilter_graph_parse_ptr(m_filter_graph, description, &inputs, &outputs, nullptr);
|
||||
if (ret < 0) {
|
||||
LOG_ERROR(HW_GPU, "avfilter_graph_parse_ptr error: {}", AVError(ret));
|
||||
return;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_config(m_filter_graph, nullptr);
|
||||
if (ret < 0) {
|
||||
LOG_ERROR(HW_GPU, "avfilter_graph_config error: {}", AVError(ret));
|
||||
return;
|
||||
}
|
||||
|
||||
m_initialized = true;
|
||||
}
|
||||
|
||||
bool DeinterlaceFilter::AddSourceFrame(const Frame& frame) {
|
||||
if (const int ret = av_buffersrc_add_frame_flags(m_source_context, frame.GetFrame(),
|
||||
AV_BUFFERSRC_FLAG_KEEP_REF);
|
||||
ret < 0) {
|
||||
LOG_ERROR(HW_GPU, "av_buffersrc_add_frame_flags error: {}", AVError(ret));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::unique_ptr<Frame> DeinterlaceFilter::DrainSinkFrame() {
|
||||
auto dst_frame = std::make_unique<Frame>();
|
||||
const int ret = av_buffersink_get_frame(m_sink_context, dst_frame->GetFrame());
|
||||
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR(AVERROR_EOF)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
LOG_ERROR(HW_GPU, "av_buffersink_get_frame error: {}", AVError(ret));
|
||||
return {};
|
||||
}
|
||||
|
||||
return dst_frame;
|
||||
}
|
||||
|
||||
DeinterlaceFilter::~DeinterlaceFilter() {
|
||||
avfilter_graph_free(&m_filter_graph);
|
||||
#if defined(FF_API_INTERLACED_FRAME) || LIBAVUTIL_VERSION_MAJOR >= 59
|
||||
m_temp_frame->GetFrame()->interlaced_frame =
|
||||
(m_temp_frame->GetFrame()->flags & AV_FRAME_FLAG_INTERLACED) != 0;
|
||||
#endif
|
||||
return std::move(m_temp_frame);
|
||||
}
|
||||
|
||||
void DecodeApi::Reset() {
|
||||
m_deinterlace_filter.reset();
|
||||
m_hardware_context.reset();
|
||||
m_decoder_context.reset();
|
||||
m_decoder.reset();
|
||||
@ -382,43 +338,14 @@ bool DecodeApi::Initialize(Tegra::Host1x::NvdecCommon::VideoCodec codec) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DecodeApi::SendPacket(std::span<const u8> packet_data, size_t configuration_size) {
|
||||
bool DecodeApi::SendPacket(std::span<const u8> packet_data) {
|
||||
FFmpeg::Packet packet(packet_data);
|
||||
return m_decoder_context->SendPacket(packet);
|
||||
}
|
||||
|
||||
void DecodeApi::ReceiveFrames(std::queue<std::unique_ptr<Frame>>& frame_queue) {
|
||||
std::shared_ptr<Frame> DecodeApi::ReceiveFrame() {
|
||||
// Receive raw frame from decoder.
|
||||
bool is_interlaced;
|
||||
auto frame = m_decoder_context->ReceiveFrame(&is_interlaced);
|
||||
if (!frame) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!is_interlaced) {
|
||||
// If the frame is not interlaced, we can pend it now.
|
||||
frame_queue.push(std::move(frame));
|
||||
} else {
|
||||
// Create the deinterlacer if needed.
|
||||
if (!m_deinterlace_filter) {
|
||||
m_deinterlace_filter.emplace(*frame);
|
||||
}
|
||||
|
||||
// Add the frame we just received.
|
||||
if (!m_deinterlace_filter->AddSourceFrame(*frame)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Pend output fields.
|
||||
while (true) {
|
||||
auto filter_frame = m_deinterlace_filter->DrainSinkFrame();
|
||||
if (!filter_frame) {
|
||||
break;
|
||||
}
|
||||
|
||||
frame_queue.push(std::move(filter_frame));
|
||||
}
|
||||
}
|
||||
return m_decoder_context->ReceiveFrame();
|
||||
}
|
||||
|
||||
} // namespace FFmpeg
|
||||
|
@ -20,17 +20,20 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavfilter/avfilter.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/avutil.h>
|
||||
#include <libavutil/opt.h>
|
||||
#ifndef ANDROID
|
||||
#include <libavcodec/codec_internal.h>
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace Tegra {
|
||||
class MemoryManager;
|
||||
}
|
||||
|
||||
namespace FFmpeg {
|
||||
|
||||
class Packet;
|
||||
@ -90,6 +93,10 @@ public:
|
||||
return m_frame->data[plane];
|
||||
}
|
||||
|
||||
const u8* GetPlane(int plane) const {
|
||||
return m_frame->data[plane];
|
||||
}
|
||||
|
||||
u8** GetPlanes() const {
|
||||
return m_frame->data;
|
||||
}
|
||||
@ -98,6 +105,14 @@ public:
|
||||
m_frame->format = format;
|
||||
}
|
||||
|
||||
bool IsInterlaced() const {
|
||||
return m_frame->interlaced_frame != 0;
|
||||
}
|
||||
|
||||
bool IsHardwareDecoded() const {
|
||||
return m_frame->hw_frames_ctx != nullptr;
|
||||
}
|
||||
|
||||
AVFrame* GetFrame() const {
|
||||
return m_frame;
|
||||
}
|
||||
@ -160,33 +175,22 @@ public:
|
||||
void InitializeHardwareDecoder(const HardwareContext& context, AVPixelFormat hw_pix_fmt);
|
||||
bool OpenContext(const Decoder& decoder);
|
||||
bool SendPacket(const Packet& packet);
|
||||
std::unique_ptr<Frame> ReceiveFrame(bool* out_is_interlaced);
|
||||
std::shared_ptr<Frame> ReceiveFrame();
|
||||
|
||||
AVCodecContext* GetCodecContext() const {
|
||||
return m_codec_context;
|
||||
}
|
||||
|
||||
bool UsingDecodeOrder() const {
|
||||
return m_decode_order;
|
||||
}
|
||||
|
||||
private:
|
||||
const Decoder& m_decoder;
|
||||
AVCodecContext* m_codec_context{};
|
||||
};
|
||||
|
||||
// Wraps an AVFilterGraph.
|
||||
class DeinterlaceFilter {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(DeinterlaceFilter);
|
||||
YUZU_NON_MOVEABLE(DeinterlaceFilter);
|
||||
|
||||
explicit DeinterlaceFilter(const Frame& frame);
|
||||
~DeinterlaceFilter();
|
||||
|
||||
bool AddSourceFrame(const Frame& frame);
|
||||
std::unique_ptr<Frame> DrainSinkFrame();
|
||||
|
||||
private:
|
||||
AVFilterGraph* m_filter_graph{};
|
||||
AVFilterContext* m_source_context{};
|
||||
AVFilterContext* m_sink_context{};
|
||||
bool m_initialized{};
|
||||
s32 m_got_frame{};
|
||||
std::shared_ptr<Frame> m_temp_frame{};
|
||||
bool m_decode_order{};
|
||||
};
|
||||
|
||||
class DecodeApi {
|
||||
@ -200,14 +204,17 @@ public:
|
||||
bool Initialize(Tegra::Host1x::NvdecCommon::VideoCodec codec);
|
||||
void Reset();
|
||||
|
||||
bool SendPacket(std::span<const u8> packet_data, size_t configuration_size);
|
||||
void ReceiveFrames(std::queue<std::unique_ptr<Frame>>& frame_queue);
|
||||
bool UsingDecodeOrder() const {
|
||||
return m_decoder_context->UsingDecodeOrder();
|
||||
}
|
||||
|
||||
bool SendPacket(std::span<const u8> packet_data);
|
||||
std::shared_ptr<Frame> ReceiveFrame();
|
||||
|
||||
private:
|
||||
std::optional<FFmpeg::Decoder> m_decoder;
|
||||
std::optional<FFmpeg::DecoderContext> m_decoder_context;
|
||||
std::optional<FFmpeg::HardwareContext> m_hardware_context;
|
||||
std::optional<FFmpeg::DeinterlaceFilter> m_deinterlace_filter;
|
||||
};
|
||||
|
||||
} // namespace FFmpeg
|
||||
|
@ -3,10 +3,10 @@
|
||||
|
||||
#include "core/core.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/nvdec.h"
|
||||
#include "video_core/host1x/vic.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
namespace Tegra::Host1x {
|
||||
|
||||
Host1x::Host1x(Core::System& system_)
|
||||
: system{system_}, syncpoint_manager{},
|
||||
@ -15,6 +15,22 @@ Host1x::Host1x(Core::System& system_)
|
||||
|
||||
Host1x::~Host1x() = default;
|
||||
|
||||
} // namespace Host1x
|
||||
void Host1x::StartDevice(s32 fd, ChannelType type, u32 syncpt) {
|
||||
switch (type) {
|
||||
case ChannelType::NvDec:
|
||||
devices[fd] = std::make_unique<Tegra::Host1x::Nvdec>(*this, fd, syncpt, frame_queue);
|
||||
break;
|
||||
case ChannelType::VIC:
|
||||
devices[fd] = std::make_unique<Tegra::Host1x::Vic>(*this, fd, syncpt, frame_queue);
|
||||
break;
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unimplemented host1x device {}", static_cast<u32>(type));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Tegra
|
||||
void Host1x::StopDevice(s32 fd, ChannelType type) {
|
||||
devices.erase(fd);
|
||||
}
|
||||
|
||||
} // namespace Tegra::Host1x
|
||||
|
@ -3,9 +3,14 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <queue>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
#include "common/address_space.h"
|
||||
#include "video_core/cdma_pusher.h"
|
||||
#include "video_core/host1x/gpu_device_memory_manager.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
@ -14,15 +19,128 @@ namespace Core {
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Tegra {
|
||||
namespace FFmpeg {
|
||||
class Frame;
|
||||
} // namespace FFmpeg
|
||||
|
||||
namespace Host1x {
|
||||
namespace Tegra::Host1x {
|
||||
class Nvdec;
|
||||
|
||||
class FrameQueue {
|
||||
public:
|
||||
void Open(s32 fd) {
|
||||
std::scoped_lock l{m_mutex};
|
||||
m_presentation_order.insert({fd, {}});
|
||||
m_decode_order.insert({fd, {}});
|
||||
}
|
||||
|
||||
void Close(s32 fd) {
|
||||
std::scoped_lock l{m_mutex};
|
||||
m_presentation_order.erase(fd);
|
||||
m_decode_order.erase(fd);
|
||||
}
|
||||
|
||||
s32 VicFindNvdecFdFromOffset(u64 search_offset) {
|
||||
std::scoped_lock l{m_mutex};
|
||||
// Vic does not know which nvdec is producing frames for it, so search all the fds here for
|
||||
// the given offset.
|
||||
for (auto& map : m_presentation_order) {
|
||||
for (auto& [offset, frame] : map.second) {
|
||||
if (offset == search_offset) {
|
||||
return map.first;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& map : m_decode_order) {
|
||||
for (auto& [offset, frame] : map.second) {
|
||||
if (offset == search_offset) {
|
||||
return map.first;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void PushPresentOrder(s32 fd, u64 offset, std::shared_ptr<FFmpeg::Frame>&& frame) {
|
||||
std::scoped_lock l{m_mutex};
|
||||
auto map = m_presentation_order.find(fd);
|
||||
map->second.emplace_back(offset, std::move(frame));
|
||||
}
|
||||
|
||||
void PushDecodeOrder(s32 fd, u64 offset, std::shared_ptr<FFmpeg::Frame>&& frame) {
|
||||
std::scoped_lock l{m_mutex};
|
||||
auto map = m_decode_order.find(fd);
|
||||
map->second.insert_or_assign(offset, std::move(frame));
|
||||
}
|
||||
|
||||
std::shared_ptr<FFmpeg::Frame> GetFrame(s32 fd, u64 offset) {
|
||||
if (fd == -1) {
|
||||
return {};
|
||||
}
|
||||
|
||||
std::scoped_lock l{m_mutex};
|
||||
auto present_map = m_presentation_order.find(fd);
|
||||
if (present_map->second.size() > 0) {
|
||||
return GetPresentOrderLocked(fd);
|
||||
}
|
||||
|
||||
auto decode_map = m_decode_order.find(fd);
|
||||
if (decode_map->second.size() > 0) {
|
||||
return GetDecodeOrderLocked(fd, offset);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<FFmpeg::Frame> GetPresentOrderLocked(s32 fd) {
|
||||
auto map = m_presentation_order.find(fd);
|
||||
if (map->second.size() == 0) {
|
||||
return {};
|
||||
}
|
||||
auto frame = std::move(map->second.front().second);
|
||||
map->second.pop_front();
|
||||
return frame;
|
||||
}
|
||||
|
||||
std::shared_ptr<FFmpeg::Frame> GetDecodeOrderLocked(s32 fd, u64 offset) {
|
||||
auto map = m_decode_order.find(fd);
|
||||
auto it = map->second.find(offset);
|
||||
if (it == map->second.end()) {
|
||||
return {};
|
||||
}
|
||||
return std::move(map->second.extract(it).mapped());
|
||||
}
|
||||
|
||||
using FramePtr = std::shared_ptr<FFmpeg::Frame>;
|
||||
|
||||
std::mutex m_mutex{};
|
||||
std::unordered_map<s32, std::deque<std::pair<u64, FramePtr>>> m_presentation_order;
|
||||
std::unordered_map<s32, std::unordered_map<u64, FramePtr>> m_decode_order;
|
||||
};
|
||||
|
||||
enum class ChannelType : u32 {
|
||||
MsEnc = 0,
|
||||
VIC = 1,
|
||||
GPU = 2,
|
||||
NvDec = 3,
|
||||
Display = 4,
|
||||
NvJpg = 5,
|
||||
TSec = 6,
|
||||
Max = 7,
|
||||
};
|
||||
|
||||
class Host1x {
|
||||
public:
|
||||
explicit Host1x(Core::System& system);
|
||||
~Host1x();
|
||||
|
||||
Core::System& System() {
|
||||
return system;
|
||||
}
|
||||
|
||||
SyncpointManager& GetSyncpointManager() {
|
||||
return syncpoint_manager;
|
||||
}
|
||||
@ -55,14 +173,25 @@ public:
|
||||
return *allocator;
|
||||
}
|
||||
|
||||
void StartDevice(s32 fd, ChannelType type, u32 syncpt);
|
||||
void StopDevice(s32 fd, ChannelType type);
|
||||
|
||||
void PushEntries(s32 fd, ChCommandHeaderList&& entries) {
|
||||
auto it = devices.find(fd);
|
||||
if (it == devices.end()) {
|
||||
return;
|
||||
}
|
||||
it->second->PushEntries(std::move(entries));
|
||||
}
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
SyncpointManager syncpoint_manager;
|
||||
Tegra::MaxwellDeviceMemoryManager memory_manager;
|
||||
Tegra::MemoryManager gmmu_manager;
|
||||
std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator;
|
||||
FrameQueue frame_queue;
|
||||
std::unordered_map<s32, std::unique_ptr<CDmaPusher>> devices;
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
||||
} // namespace Tegra
|
||||
} // namespace Tegra::Host1x
|
||||
|
@ -2,6 +2,12 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/assert.h"
|
||||
|
||||
#include "common/polyfill_thread.h"
|
||||
#include "common/settings.h"
|
||||
#include "video_core/host1x/codecs/h264.h"
|
||||
#include "video_core/host1x/codecs/vp8.h"
|
||||
#include "video_core/host1x/codecs/vp9.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/nvdec.h"
|
||||
|
||||
@ -10,37 +16,70 @@ namespace Tegra::Host1x {
|
||||
#define NVDEC_REG_INDEX(field_name) \
|
||||
(offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64))
|
||||
|
||||
Nvdec::Nvdec(Host1x& host1x_)
|
||||
: host1x(host1x_), state{}, codec(std::make_unique<Codec>(host1x, state)) {}
|
||||
Nvdec::Nvdec(Host1x& host1x_, s32 id_, u32 syncpt, FrameQueue& frame_queue_)
|
||||
: CDmaPusher{host1x_, id_}, id{id_}, syncpoint{syncpt}, frame_queue{frame_queue_} {
|
||||
LOG_INFO(HW_GPU, "Created nvdec {}", id);
|
||||
frame_queue.Open(id);
|
||||
}
|
||||
|
||||
Nvdec::~Nvdec() = default;
|
||||
Nvdec::~Nvdec() {
|
||||
LOG_INFO(HW_GPU, "Destroying nvdec {}", id);
|
||||
frame_queue.Close(id);
|
||||
}
|
||||
|
||||
void Nvdec::ProcessMethod(u32 method, u32 argument) {
|
||||
state.reg_array[method] = static_cast<u64>(argument) << 8;
|
||||
regs.reg_array[method] = argument;
|
||||
|
||||
switch (method) {
|
||||
case NVDEC_REG_INDEX(set_codec_id):
|
||||
codec->SetTargetCodec(static_cast<NvdecCommon::VideoCodec>(argument));
|
||||
CreateDecoder(static_cast<NvdecCommon::VideoCodec>(argument));
|
||||
break;
|
||||
case NVDEC_REG_INDEX(execute):
|
||||
case NVDEC_REG_INDEX(execute): {
|
||||
if (wait_needed) {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(32));
|
||||
wait_needed = false;
|
||||
}
|
||||
Execute();
|
||||
break;
|
||||
} break;
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<FFmpeg::Frame> Nvdec::GetFrame() {
|
||||
return codec->GetCurrentFrame();
|
||||
void Nvdec::CreateDecoder(NvdecCommon::VideoCodec codec) {
|
||||
if (decoder.get()) {
|
||||
return;
|
||||
}
|
||||
switch (codec) {
|
||||
case NvdecCommon::VideoCodec::H264:
|
||||
decoder = std::make_unique<Decoders::H264>(host1x, regs, id, frame_queue);
|
||||
break;
|
||||
case NvdecCommon::VideoCodec::VP8:
|
||||
decoder = std::make_unique<Decoders::VP8>(host1x, regs, id, frame_queue);
|
||||
break;
|
||||
case NvdecCommon::VideoCodec::VP9:
|
||||
decoder = std::make_unique<Decoders::VP9>(host1x, regs, id, frame_queue);
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Codec {}", decoder->GetCurrentCodecName());
|
||||
break;
|
||||
}
|
||||
LOG_INFO(HW_GPU, "Created decoder {} for id {}", decoder->GetCurrentCodecName(), id);
|
||||
}
|
||||
|
||||
void Nvdec::Execute() {
|
||||
switch (codec->GetCurrentCodec()) {
|
||||
if (Settings::values.nvdec_emulation.GetValue() == Settings::NvdecEmulation::Off) [[unlikely]] {
|
||||
// Signalling syncpts too fast can cause games to get stuck as they don't expect a <1ms
|
||||
// execution time. Sleep for half of a 60 fps frame just in case.
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(8));
|
||||
return;
|
||||
}
|
||||
switch (decoder->GetCurrentCodec()) {
|
||||
case NvdecCommon::VideoCodec::H264:
|
||||
case NvdecCommon::VideoCodec::VP8:
|
||||
case NvdecCommon::VideoCodec::VP9:
|
||||
codec->Decode();
|
||||
decoder->Decode();
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Codec {}", codec->GetCurrentCodecName());
|
||||
UNIMPLEMENTED_MSG("Codec {}", decoder->GetCurrentCodecName());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -5,33 +5,47 @@
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/host1x/codecs/codec.h"
|
||||
#include "video_core/cdma_pusher.h"
|
||||
#include "video_core/host1x/codecs/decoder.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
class Host1x;
|
||||
class FrameQueue;
|
||||
|
||||
class Nvdec {
|
||||
class Nvdec final : public CDmaPusher {
|
||||
public:
|
||||
explicit Nvdec(Host1x& host1x);
|
||||
explicit Nvdec(Host1x& host1x, s32 id, u32 syncpt, FrameQueue& frame_queue_);
|
||||
~Nvdec();
|
||||
|
||||
/// Writes the method into the state, Invoke Execute() if encountered
|
||||
void ProcessMethod(u32 method, u32 argument);
|
||||
void ProcessMethod(u32 method, u32 arg) override;
|
||||
|
||||
/// Return most recently decoded frame
|
||||
[[nodiscard]] std::unique_ptr<FFmpeg::Frame> GetFrame();
|
||||
u32 GetSyncpoint() const {
|
||||
return syncpoint;
|
||||
}
|
||||
|
||||
void SetWait() {
|
||||
wait_needed = true;
|
||||
}
|
||||
|
||||
private:
|
||||
/// Create the decoder when the codec id is set
|
||||
void CreateDecoder(NvdecCommon::VideoCodec codec);
|
||||
|
||||
/// Invoke codec to decode a frame
|
||||
void Execute();
|
||||
|
||||
Host1x& host1x;
|
||||
NvdecCommon::NvdecRegisters state;
|
||||
std::unique_ptr<Codec> codec;
|
||||
s32 id;
|
||||
u32 syncpoint;
|
||||
FrameQueue& frame_queue;
|
||||
|
||||
NvdecCommon::NvdecRegisters regs{};
|
||||
std::unique_ptr<Decoder> decoder;
|
||||
bool wait_needed{false};
|
||||
};
|
||||
|
||||
} // namespace Host1x
|
||||
|
@ -17,6 +17,17 @@ enum class VideoCodec : u64 {
|
||||
VP9 = 0x9,
|
||||
};
|
||||
|
||||
struct Offset {
|
||||
constexpr u64 Address() const noexcept {
|
||||
return offset << 8;
|
||||
}
|
||||
|
||||
private:
|
||||
u64 offset;
|
||||
};
|
||||
static_assert(std::is_trivial_v<Offset>, "Offset must be trivial");
|
||||
static_assert(sizeof(Offset) == 0x8, "Offset has the wrong size!");
|
||||
|
||||
// NVDEC should use a 32-bit address space, but is mapped to 64-bit,
|
||||
// doubling the sizes here is compensating for that.
|
||||
struct NvdecRegisters {
|
||||
@ -38,29 +49,40 @@ struct NvdecRegisters {
|
||||
BitField<17, 1, u64> all_intra_frame;
|
||||
};
|
||||
} control_params;
|
||||
u64 picture_info_offset; ///< 0x0808
|
||||
u64 frame_bitstream_offset; ///< 0x0810
|
||||
u64 frame_number; ///< 0x0818
|
||||
u64 h264_slice_data_offsets; ///< 0x0820
|
||||
u64 h264_mv_dump_offset; ///< 0x0828
|
||||
INSERT_PADDING_WORDS_NOINIT(6); ///< 0x0830
|
||||
u64 frame_stats_offset; ///< 0x0848
|
||||
u64 h264_last_surface_luma_offset; ///< 0x0850
|
||||
u64 h264_last_surface_chroma_offset; ///< 0x0858
|
||||
std::array<u64, 17> surface_luma_offset; ///< 0x0860
|
||||
std::array<u64, 17> surface_chroma_offset; ///< 0x08E8
|
||||
INSERT_PADDING_WORDS_NOINIT(68); ///< 0x0970
|
||||
u64 vp8_prob_data_offset; ///< 0x0A80
|
||||
u64 vp8_header_partition_buf_offset; ///< 0x0A88
|
||||
INSERT_PADDING_WORDS_NOINIT(60); ///< 0x0A90
|
||||
u64 vp9_entropy_probs_offset; ///< 0x0B80
|
||||
u64 vp9_backward_updates_offset; ///< 0x0B88
|
||||
u64 vp9_last_frame_segmap_offset; ///< 0x0B90
|
||||
u64 vp9_curr_frame_segmap_offset; ///< 0x0B98
|
||||
INSERT_PADDING_WORDS_NOINIT(2); ///< 0x0BA0
|
||||
u64 vp9_last_frame_mvs_offset; ///< 0x0BA8
|
||||
u64 vp9_curr_frame_mvs_offset; ///< 0x0BB0
|
||||
INSERT_PADDING_WORDS_NOINIT(2); ///< 0x0BB8
|
||||
Offset picture_info_offset; ///< 0x0808
|
||||
Offset frame_bitstream_offset; ///< 0x0810
|
||||
u64 frame_number; ///< 0x0818
|
||||
Offset h264_slice_data_offsets; ///< 0x0820
|
||||
Offset h264_mv_dump_offset; ///< 0x0828
|
||||
INSERT_PADDING_WORDS_NOINIT(6); ///< 0x0830
|
||||
Offset frame_stats_offset; ///< 0x0848
|
||||
Offset h264_last_surface_luma_offset; ///< 0x0850
|
||||
Offset h264_last_surface_chroma_offset; ///< 0x0858
|
||||
std::array<Offset, 17> surface_luma_offsets; ///< 0x0860
|
||||
std::array<Offset, 17> surface_chroma_offsets; ///< 0x08E8
|
||||
Offset pic_scratch_buf_offset; ///< 0x0970
|
||||
Offset external_mvbuffer_offset; ///< 0x0978
|
||||
INSERT_PADDING_WORDS_NOINIT(32); ///< 0x0980
|
||||
Offset h264_mbhist_buffer_offset; ///< 0x0A00
|
||||
INSERT_PADDING_WORDS_NOINIT(30); ///< 0x0A08
|
||||
Offset vp8_prob_data_offset; ///< 0x0A80
|
||||
Offset vp8_header_partition_buf_offset; ///< 0x0A88
|
||||
INSERT_PADDING_WORDS_NOINIT(28); ///< 0x0A90
|
||||
Offset hvec_scalist_list_offset; ///< 0x0B00
|
||||
Offset hvec_tile_sizes_offset; ///< 0x0B08
|
||||
Offset hvec_filter_buffer_offset; ///< 0x0B10
|
||||
Offset hvec_sao_buffer_offset; ///< 0x0B18
|
||||
Offset hvec_slice_info_buffer_offset; ///< 0x0B20
|
||||
Offset hvec_slice_group_index_offset; ///< 0x0B28
|
||||
INSERT_PADDING_WORDS_NOINIT(20); ///< 0x0B30
|
||||
Offset vp9_prob_tab_buffer_offset; ///< 0x0B80
|
||||
Offset vp9_ctx_counter_buffer_offset; ///< 0x0B88
|
||||
Offset vp9_segment_read_buffer_offset; ///< 0x0B90
|
||||
Offset vp9_segment_write_buffer_offset; ///< 0x0B98
|
||||
Offset vp9_tile_size_buffer_offset; ///< 0x0BA0
|
||||
Offset vp9_col_mvwrite_buffer_offset; ///< 0x0BA8
|
||||
Offset vp9_col_mvread_buffer_offset; ///< 0x0BB0
|
||||
Offset vp9_filter_buffer_offset; ///< 0x0BB8
|
||||
};
|
||||
std::array<u64, NUM_REGS> reg_array;
|
||||
};
|
||||
@ -81,16 +103,16 @@ ASSERT_REG_POSITION(h264_slice_data_offsets, 0x104);
|
||||
ASSERT_REG_POSITION(frame_stats_offset, 0x109);
|
||||
ASSERT_REG_POSITION(h264_last_surface_luma_offset, 0x10A);
|
||||
ASSERT_REG_POSITION(h264_last_surface_chroma_offset, 0x10B);
|
||||
ASSERT_REG_POSITION(surface_luma_offset, 0x10C);
|
||||
ASSERT_REG_POSITION(surface_chroma_offset, 0x11D);
|
||||
ASSERT_REG_POSITION(surface_luma_offsets, 0x10C);
|
||||
ASSERT_REG_POSITION(surface_chroma_offsets, 0x11D);
|
||||
ASSERT_REG_POSITION(vp8_prob_data_offset, 0x150);
|
||||
ASSERT_REG_POSITION(vp8_header_partition_buf_offset, 0x151);
|
||||
ASSERT_REG_POSITION(vp9_entropy_probs_offset, 0x170);
|
||||
ASSERT_REG_POSITION(vp9_backward_updates_offset, 0x171);
|
||||
ASSERT_REG_POSITION(vp9_last_frame_segmap_offset, 0x172);
|
||||
ASSERT_REG_POSITION(vp9_curr_frame_segmap_offset, 0x173);
|
||||
ASSERT_REG_POSITION(vp9_last_frame_mvs_offset, 0x175);
|
||||
ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176);
|
||||
ASSERT_REG_POSITION(vp9_prob_tab_buffer_offset, 0x170);
|
||||
ASSERT_REG_POSITION(vp9_ctx_counter_buffer_offset, 0x171);
|
||||
ASSERT_REG_POSITION(vp9_segment_read_buffer_offset, 0x172);
|
||||
ASSERT_REG_POSITION(vp9_segment_write_buffer_offset, 0x173);
|
||||
ASSERT_REG_POSITION(vp9_col_mvwrite_buffer_offset, 0x175);
|
||||
ASSERT_REG_POSITION(vp9_col_mvread_buffer_offset, 0x176);
|
||||
|
||||
#undef ASSERT_REG_POSITION
|
||||
|
||||
|
@ -1,50 +0,0 @@
|
||||
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#include <algorithm>
|
||||
#include "sync_manager.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
|
||||
namespace Tegra {
|
||||
namespace Host1x {
|
||||
|
||||
SyncptIncrManager::SyncptIncrManager(Host1x& host1x_) : host1x(host1x_) {}
|
||||
SyncptIncrManager::~SyncptIncrManager() = default;
|
||||
|
||||
void SyncptIncrManager::Increment(u32 id) {
|
||||
increments.emplace_back(0, 0, id, true);
|
||||
IncrementAllDone();
|
||||
}
|
||||
|
||||
u32 SyncptIncrManager::IncrementWhenDone(u32 class_id, u32 id) {
|
||||
const u32 handle = current_id++;
|
||||
increments.emplace_back(handle, class_id, id);
|
||||
return handle;
|
||||
}
|
||||
|
||||
void SyncptIncrManager::SignalDone(u32 handle) {
|
||||
const auto done_incr =
|
||||
std::find_if(increments.begin(), increments.end(),
|
||||
[handle](const SyncptIncr& incr) { return incr.id == handle; });
|
||||
if (done_incr != increments.cend()) {
|
||||
done_incr->complete = true;
|
||||
}
|
||||
IncrementAllDone();
|
||||
}
|
||||
|
||||
void SyncptIncrManager::IncrementAllDone() {
|
||||
std::size_t done_count = 0;
|
||||
for (; done_count < increments.size(); ++done_count) {
|
||||
if (!increments[done_count].complete) {
|
||||
break;
|
||||
}
|
||||
auto& syncpoint_manager = host1x.GetSyncpointManager();
|
||||
syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id);
|
||||
syncpoint_manager.IncrementHost(increments[done_count].syncpt_id);
|
||||
}
|
||||
increments.erase(increments.begin(), increments.begin() + done_count);
|
||||
}
|
||||
|
||||
} // namespace Host1x
|
||||
} // namespace Tegra
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user