mirror of
https://github.com/yuzu-emu/yuzu-android
synced 2024-12-23 12:21:21 -08:00
core: hle: kernel: Rename Thread to KThread.
This commit is contained in:
parent
df41e78205
commit
c0d3aef28c
@ -168,6 +168,8 @@ add_library(core STATIC
|
||||
hle/kernel/k_scoped_scheduler_lock_and_sleep.h
|
||||
hle/kernel/k_synchronization_object.cpp
|
||||
hle/kernel/k_synchronization_object.h
|
||||
hle/kernel/k_thread.cpp
|
||||
hle/kernel/k_thread.h
|
||||
hle/kernel/kernel.cpp
|
||||
hle/kernel/kernel.h
|
||||
hle/kernel/memory/address_space_info.cpp
|
||||
@ -216,8 +218,6 @@ add_library(core STATIC
|
||||
hle/kernel/svc_results.h
|
||||
hle/kernel/svc_types.h
|
||||
hle/kernel/svc_wrap.h
|
||||
hle/kernel/thread.cpp
|
||||
hle/kernel/thread.h
|
||||
hle/kernel/time_manager.cpp
|
||||
hle/kernel/time_manager.h
|
||||
hle/kernel/transfer_memory.cpp
|
||||
|
@ -28,10 +28,10 @@
|
||||
#include "core/hardware_interrupt_manager.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/service/am/applets/applets.h"
|
||||
#include "core/hle/service/apm/controller.h"
|
||||
#include "core/hle/service/filesystem/filesystem.h"
|
||||
|
@ -11,9 +11,9 @@
|
||||
#include "core/core_timing.h"
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "video_core/gpu.h"
|
||||
|
||||
namespace Core {
|
||||
@ -147,7 +147,7 @@ void CpuManager::MultiCoreRunSuspendThread() {
|
||||
while (true) {
|
||||
auto core = kernel.GetCurrentHostThreadID();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||
@ -245,7 +245,7 @@ void CpuManager::SingleCoreRunSuspendThread() {
|
||||
while (true) {
|
||||
auto core = kernel.GetCurrentHostThreadID();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||
@ -256,7 +256,7 @@ void CpuManager::SingleCoreRunSuspendThread() {
|
||||
void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||
{
|
||||
auto& scheduler = system.Kernel().Scheduler(current_core);
|
||||
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
if (idle_count >= 4 || from_running_enviroment) {
|
||||
if (!from_running_enviroment) {
|
||||
system.CoreTiming().Idle();
|
||||
|
@ -5,9 +5,9 @@
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
@ -38,7 +38,7 @@ ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kern
|
||||
return MakeResult(std::move(client_session));
|
||||
}
|
||||
|
||||
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread,
|
||||
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread,
|
||||
Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
// Keep ServerSession alive until we're done working with it.
|
||||
|
@ -24,7 +24,7 @@ namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Session;
|
||||
class Thread;
|
||||
class KThread;
|
||||
|
||||
class ClientSession final : public KSynchronizationObject {
|
||||
public:
|
||||
@ -46,7 +46,7 @@ public:
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
|
||||
ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
@ -17,12 +17,12 @@ GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
|
||||
|
||||
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
|
||||
|
||||
void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
|
||||
void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||
thread_list.end());
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/k_priority_queue.h"
|
||||
#include "core/hle/kernel/k_scheduler_lock.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
@ -20,7 +20,7 @@ class KernelCore;
|
||||
class SchedulerLock;
|
||||
|
||||
using KSchedulerPriorityQueue =
|
||||
KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
|
||||
KPriorityQueue<KThread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
|
||||
constexpr s32 HighestCoreMigrationAllowedPriority = 2;
|
||||
|
||||
class GlobalSchedulerContext final {
|
||||
@ -33,13 +33,13 @@ public:
|
||||
~GlobalSchedulerContext();
|
||||
|
||||
/// Adds a new thread to the scheduler
|
||||
void AddThread(std::shared_ptr<Thread> thread);
|
||||
void AddThread(std::shared_ptr<KThread> thread);
|
||||
|
||||
/// Removes a thread from the scheduler
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
void RemoveThread(std::shared_ptr<KThread> thread);
|
||||
|
||||
/// Returns a list of all threads managed by the scheduler
|
||||
[[nodiscard]] const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
|
||||
[[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const {
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
@ -74,7 +74,7 @@ private:
|
||||
LockType scheduler_lock;
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<std::shared_ptr<Thread>> thread_list;
|
||||
std::vector<std::shared_ptr<KThread>> thread_list;
|
||||
Common::SpinLock global_list_guard{};
|
||||
};
|
||||
|
||||
|
@ -9,9 +9,9 @@
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
|
@ -19,12 +19,12 @@
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/memory.h"
|
||||
@ -48,7 +48,7 @@ void SessionRequestHandler::ClientDisconnected(
|
||||
|
||||
HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
|
||||
std::shared_ptr<ServerSession> server_session,
|
||||
std::shared_ptr<Thread> thread)
|
||||
std::shared_ptr<KThread> thread)
|
||||
: server_session(std::move(server_session)),
|
||||
thread(std::move(thread)), kernel{kernel}, memory{memory} {
|
||||
cmd_buf[0] = 0;
|
||||
@ -182,7 +182,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTabl
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
|
||||
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) {
|
||||
auto& owner_process = *thread.GetOwnerProcess();
|
||||
auto& handle_table = owner_process.GetHandleTable();
|
||||
|
||||
|
@ -40,7 +40,7 @@ class HLERequestContext;
|
||||
class KernelCore;
|
||||
class Process;
|
||||
class ServerSession;
|
||||
class Thread;
|
||||
class KThread;
|
||||
class ReadableEvent;
|
||||
class WritableEvent;
|
||||
|
||||
@ -110,7 +110,7 @@ class HLERequestContext {
|
||||
public:
|
||||
explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
|
||||
std::shared_ptr<ServerSession> session,
|
||||
std::shared_ptr<Thread> thread);
|
||||
std::shared_ptr<KThread> thread);
|
||||
~HLERequestContext();
|
||||
|
||||
/// Returns a pointer to the IPC command buffer for this request.
|
||||
@ -127,14 +127,14 @@ public:
|
||||
}
|
||||
|
||||
using WakeupCallback = std::function<void(
|
||||
std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
|
||||
std::shared_ptr<KThread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
|
||||
|
||||
/// Populates this context with data from the requesting process/thread.
|
||||
ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
|
||||
u32_le* src_cmdbuf);
|
||||
|
||||
/// Writes data from this context back to the requesting process/thread.
|
||||
ResultCode WriteToOutgoingCommandBuffer(Thread& thread);
|
||||
ResultCode WriteToOutgoingCommandBuffer(KThread& thread);
|
||||
|
||||
u32_le GetCommand() const {
|
||||
return command;
|
||||
@ -267,11 +267,11 @@ public:
|
||||
|
||||
std::string Description() const;
|
||||
|
||||
Thread& GetThread() {
|
||||
KThread& GetThread() {
|
||||
return *thread;
|
||||
}
|
||||
|
||||
const Thread& GetThread() const {
|
||||
const KThread& GetThread() const {
|
||||
return *thread;
|
||||
}
|
||||
|
||||
@ -286,7 +286,7 @@ private:
|
||||
|
||||
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
|
||||
std::shared_ptr<Kernel::ServerSession> server_session;
|
||||
std::shared_ptr<Thread> thread;
|
||||
std::shared_ptr<KThread> thread;
|
||||
// TODO(yuriks): Check common usage of this and optimize size accordingly
|
||||
boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects;
|
||||
boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects;
|
||||
|
@ -7,9 +7,9 @@
|
||||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
@ -96,7 +96,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
Thread* target_thread = std::addressof(*it);
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
@ -125,7 +125,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
Thread* target_thread = std::addressof(*it);
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
@ -215,7 +215,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
Thread* target_thread = std::addressof(*it);
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
@ -231,7 +231,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||
|
||||
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Handle timer = InvalidHandle;
|
||||
|
||||
{
|
||||
@ -302,7 +302,7 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
||||
|
||||
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Handle timer = InvalidHandle;
|
||||
|
||||
{
|
||||
|
@ -10,11 +10,11 @@
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
@ -66,7 +66,7 @@ KConditionVariable::KConditionVariable(Core::System& system_)
|
||||
KConditionVariable::~KConditionVariable() = default;
|
||||
|
||||
ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
Thread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
// Signal the address.
|
||||
{
|
||||
@ -74,7 +74,7 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
|
||||
// Remove waiter thread.
|
||||
s32 num_waiters{};
|
||||
Thread* next_owner_thread =
|
||||
KThread* next_owner_thread =
|
||||
owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
|
||||
|
||||
// Determine the next tag.
|
||||
@ -103,11 +103,11 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
}
|
||||
|
||||
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
// Wait for the address.
|
||||
{
|
||||
std::shared_ptr<Thread> owner_thread;
|
||||
std::shared_ptr<KThread> owner_thread;
|
||||
ASSERT(!owner_thread);
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
@ -126,7 +126,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
|
||||
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
|
||||
|
||||
// Get the lock owner thread.
|
||||
owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(handle);
|
||||
owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle);
|
||||
R_UNLESS(owner_thread, Svc::ResultInvalidHandle);
|
||||
|
||||
// Update the lock.
|
||||
@ -143,7 +143,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
|
||||
// Remove the thread as a waiter from the lock owner.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
Thread* owner_thread = cur_thread->GetLockOwner();
|
||||
KThread* owner_thread = cur_thread->GetLockOwner();
|
||||
if (owner_thread != nullptr) {
|
||||
owner_thread->RemoveWaiter(cur_thread);
|
||||
}
|
||||
@ -154,7 +154,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
Thread* KConditionVariable::SignalImpl(Thread* thread) {
|
||||
KThread* KConditionVariable::SignalImpl(KThread* thread) {
|
||||
// Check pre-conditions.
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
@ -174,7 +174,7 @@ Thread* KConditionVariable::SignalImpl(Thread* thread) {
|
||||
}
|
||||
}
|
||||
|
||||
Thread* thread_to_close = nullptr;
|
||||
KThread* thread_to_close = nullptr;
|
||||
if (can_access) {
|
||||
if (prev_tag == InvalidHandle) {
|
||||
// If nobody held the lock previously, we're all good.
|
||||
@ -182,7 +182,7 @@ Thread* KConditionVariable::SignalImpl(Thread* thread) {
|
||||
thread->Wakeup();
|
||||
} else {
|
||||
// Get the previous owner.
|
||||
auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(
|
||||
auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(
|
||||
prev_tag & ~Svc::HandleWaitMask);
|
||||
|
||||
if (owner_thread) {
|
||||
@ -210,8 +210,8 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
|
||||
// TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using
|
||||
// std::shared_ptr.
|
||||
std::vector<std::shared_ptr<Thread>> thread_list;
|
||||
std::array<Thread*, MaxThreads> thread_array;
|
||||
std::vector<std::shared_ptr<KThread>> thread_list;
|
||||
std::array<KThread*, MaxThreads> thread_array;
|
||||
s32 num_to_close{};
|
||||
|
||||
// Perform signaling.
|
||||
@ -222,9 +222,9 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
auto it = thread_tree.nfind_light({cv_key, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetConditionVariableKey() == cv_key)) {
|
||||
Thread* target_thread = std::addressof(*it);
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
|
||||
if (Thread* thread = SignalImpl(target_thread); thread != nullptr) {
|
||||
if (KThread* thread = SignalImpl(target_thread); thread != nullptr) {
|
||||
if (num_to_close < MaxThreads) {
|
||||
thread_array[num_to_close++] = thread;
|
||||
} else {
|
||||
@ -257,7 +257,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
|
||||
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Handle timer = InvalidHandle;
|
||||
|
||||
{
|
||||
@ -276,7 +276,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
|
||||
{
|
||||
// Remove waiter thread.
|
||||
s32 num_waiters{};
|
||||
Thread* next_owner_thread =
|
||||
KThread* next_owner_thread =
|
||||
cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
|
||||
|
||||
// Update for the next owner thread.
|
||||
@ -331,7 +331,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
if (Thread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
|
||||
if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(cur_thread);
|
||||
}
|
||||
|
||||
|
@ -8,8 +8,8 @@
|
||||
#include "common/common_types.h"
|
||||
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
@ -20,7 +20,7 @@ namespace Kernel {
|
||||
|
||||
class KConditionVariable {
|
||||
public:
|
||||
using ThreadTree = typename Thread::ConditionVariableThreadTreeType;
|
||||
using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
|
||||
|
||||
explicit KConditionVariable(Core::System& system_);
|
||||
~KConditionVariable();
|
||||
@ -34,7 +34,7 @@ public:
|
||||
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||
|
||||
private:
|
||||
[[nodiscard]] Thread* SignalImpl(Thread* thread);
|
||||
[[nodiscard]] KThread* SignalImpl(KThread* thread);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
@ -43,14 +43,14 @@ private:
|
||||
};
|
||||
|
||||
inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
Thread* thread) {
|
||||
KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
tree->erase(tree->iterator_to(*thread));
|
||||
}
|
||||
|
||||
inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
Thread* thread) {
|
||||
KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
tree->insert(*thread);
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Thread;
|
||||
class KThread;
|
||||
|
||||
template <typename T>
|
||||
concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
|
||||
@ -367,7 +367,7 @@ public:
|
||||
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
|
||||
}
|
||||
|
||||
constexpr Thread* MoveToScheduledBack(Member* member) {
|
||||
constexpr KThread* MoveToScheduledBack(Member* member) {
|
||||
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
|
||||
member);
|
||||
}
|
||||
|
@ -17,15 +17,15 @@
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
static void IncrementScheduledCount(Kernel::Thread* thread) {
|
||||
static void IncrementScheduledCount(Kernel::KThread* thread) {
|
||||
if (auto process = thread->GetOwnerProcess(); process) {
|
||||
process->IncrementScheduledCount();
|
||||
}
|
||||
@ -56,9 +56,9 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
|
||||
}
|
||||
}
|
||||
|
||||
u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
|
||||
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
||||
std::scoped_lock lock{guard};
|
||||
if (Thread* prev_highest_thread = this->state.highest_priority_thread;
|
||||
if (KThread* prev_highest_thread = this->state.highest_priority_thread;
|
||||
prev_highest_thread != highest_thread) {
|
||||
if (prev_highest_thread != nullptr) {
|
||||
IncrementScheduledCount(prev_highest_thread);
|
||||
@ -90,13 +90,13 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
ClearSchedulerUpdateNeeded(kernel);
|
||||
|
||||
u64 cores_needing_scheduling = 0, idle_cores = 0;
|
||||
Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
|
||||
KThread* top_threads[Core::Hardware::NUM_CPU_CORES];
|
||||
auto& priority_queue = GetPriorityQueue(kernel);
|
||||
|
||||
/// We want to go over all cores, finding the highest priority thread and determining if
|
||||
/// scheduling is needed for that core.
|
||||
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
|
||||
KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
|
||||
if (top_thread != nullptr) {
|
||||
// If the thread has no waiters, we need to check if the process has a thread pinned.
|
||||
// TODO(bunnei): Implement thread pinning
|
||||
@ -112,7 +112,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
|
||||
while (idle_cores != 0) {
|
||||
const auto core_id = static_cast<u32>(std::countr_zero(idle_cores));
|
||||
if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
|
||||
if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
|
||||
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
|
||||
size_t num_candidates = 0;
|
||||
|
||||
@ -120,7 +120,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_thread =
|
||||
if (KThread* top_thread =
|
||||
(suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
|
||||
top_thread != suggested) {
|
||||
// Make sure we're not dealing with threads too high priority for migration.
|
||||
@ -152,7 +152,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
// Check if there's some other thread that can run on the candidate core.
|
||||
const s32 candidate_core = migration_candidates[i];
|
||||
suggested = top_threads[candidate_core];
|
||||
if (Thread* next_on_candidate_core =
|
||||
if (KThread* next_on_candidate_core =
|
||||
priority_queue.GetScheduledNext(candidate_core, suggested);
|
||||
next_on_candidate_core != nullptr) {
|
||||
// The candidate core can run some other thread! We'll migrate its current
|
||||
@ -182,7 +182,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
return cores_needing_scheduling;
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state) {
|
||||
void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Check if the state has changed, because if it hasn't there's nothing to do.
|
||||
@ -205,7 +205,7 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, Thread
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority) {
|
||||
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// If the thread is runnable, we want to change its priority in the queue.
|
||||
@ -217,7 +217,7 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
|
||||
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
|
||||
const KAffinityMask& old_affinity, s32 old_core) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
@ -237,8 +237,8 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||
auto& priority_queue = GetPriorityQueue(kernel);
|
||||
|
||||
// Rotate the front of the queue to the end.
|
||||
Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
|
||||
Thread* next_thread = nullptr;
|
||||
KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
|
||||
KThread* next_thread = nullptr;
|
||||
if (top_thread != nullptr) {
|
||||
next_thread = priority_queue.MoveToScheduledBack(top_thread);
|
||||
if (next_thread != top_thread) {
|
||||
@ -249,11 +249,11 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||
|
||||
// While we have a suggested thread, try to migrate it!
|
||||
{
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
|
||||
KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_on_suggested_core =
|
||||
if (KThread* top_on_suggested_core =
|
||||
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||
: nullptr;
|
||||
top_on_suggested_core != suggested) {
|
||||
@ -285,7 +285,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||
// Now that we might have migrated a thread with the same priority, check if we can do better.
|
||||
|
||||
{
|
||||
Thread* best_thread = priority_queue.GetScheduledFront(core_id);
|
||||
KThread* best_thread = priority_queue.GetScheduledFront(core_id);
|
||||
if (best_thread == GetCurrentThread()) {
|
||||
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
|
||||
}
|
||||
@ -293,7 +293,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||
// If the best thread we can choose has a priority the same or worse than ours, try to
|
||||
// migrate a higher priority thread.
|
||||
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
while (suggested != nullptr) {
|
||||
// If the suggestion's priority is the same as ours, don't bother.
|
||||
if (suggested->GetPriority() >= best_thread->GetPriority()) {
|
||||
@ -302,7 +302,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_on_suggested_core =
|
||||
if (KThread* top_on_suggested_core =
|
||||
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||
: nullptr;
|
||||
top_on_suggested_core != suggested) {
|
||||
@ -380,7 +380,7 @@ void KScheduler::YieldWithoutCoreMigration() {
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
Thread& cur_thread = *GetCurrentThread();
|
||||
KThread& cur_thread = *GetCurrentThread();
|
||||
Process& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@ -398,7 +398,7 @@ void KScheduler::YieldWithoutCoreMigration() {
|
||||
const auto cur_state = cur_thread.GetRawState();
|
||||
if (cur_state == ThreadState::Runnable) {
|
||||
// Put the current thread at the back of the queue.
|
||||
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||
KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||
IncrementScheduledCount(std::addressof(cur_thread));
|
||||
|
||||
// If the next thread is different, we have an update to perform.
|
||||
@ -421,7 +421,7 @@ void KScheduler::YieldWithCoreMigration() {
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
Thread& cur_thread = *GetCurrentThread();
|
||||
KThread& cur_thread = *GetCurrentThread();
|
||||
Process& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@ -442,17 +442,17 @@ void KScheduler::YieldWithCoreMigration() {
|
||||
const s32 core_id = cur_thread.GetActiveCore();
|
||||
|
||||
// Put the current thread at the back of the queue.
|
||||
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||
KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||
IncrementScheduledCount(std::addressof(cur_thread));
|
||||
|
||||
// While we have a suggested thread, try to migrate it!
|
||||
bool recheck = false;
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the thread running on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
|
||||
if (Thread* running_on_suggested_core =
|
||||
if (KThread* running_on_suggested_core =
|
||||
(suggested_core >= 0)
|
||||
? kernel.Scheduler(suggested_core).state.highest_priority_thread
|
||||
: nullptr;
|
||||
@ -511,7 +511,7 @@ void KScheduler::YieldToAnyThread() {
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
Thread& cur_thread = *GetCurrentThread();
|
||||
KThread& cur_thread = *GetCurrentThread();
|
||||
Process& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@ -539,11 +539,11 @@ void KScheduler::YieldToAnyThread() {
|
||||
// If there's nothing scheduled, we can try to perform a migration.
|
||||
if (priority_queue.GetScheduledFront(core_id) == nullptr) {
|
||||
// While we have a suggested thread, try to migrate it!
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_on_suggested_core =
|
||||
if (KThread* top_on_suggested_core =
|
||||
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||
: nullptr;
|
||||
top_on_suggested_core != suggested) {
|
||||
@ -594,7 +594,7 @@ KScheduler::KScheduler(Core::System& system, std::size_t core_id)
|
||||
|
||||
KScheduler::~KScheduler() = default;
|
||||
|
||||
Thread* KScheduler::GetCurrentThread() const {
|
||||
KThread* KScheduler::GetCurrentThread() const {
|
||||
if (current_thread) {
|
||||
return current_thread;
|
||||
}
|
||||
@ -624,7 +624,7 @@ void KScheduler::OnThreadStart() {
|
||||
SwitchContextStep2();
|
||||
}
|
||||
|
||||
void KScheduler::Unload(Thread* thread) {
|
||||
void KScheduler::Unload(KThread* thread) {
|
||||
if (thread) {
|
||||
thread->SetIsRunning(false);
|
||||
if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
|
||||
@ -643,7 +643,7 @@ void KScheduler::Unload(Thread* thread) {
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::Reload(Thread* thread) {
|
||||
void KScheduler::Reload(KThread* thread) {
|
||||
if (thread) {
|
||||
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
||||
|
||||
@ -674,7 +674,7 @@ void KScheduler::SwitchContextStep2() {
|
||||
}
|
||||
|
||||
void KScheduler::ScheduleImpl() {
|
||||
Thread* previous_thread = current_thread;
|
||||
KThread* previous_thread = current_thread;
|
||||
current_thread = state.highest_priority_thread;
|
||||
|
||||
this->state.needs_scheduling = false;
|
||||
@ -744,7 +744,7 @@ void KScheduler::SwitchToCurrent() {
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
|
||||
void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) {
|
||||
const u64 prev_switch_ticks = last_context_switch_time;
|
||||
const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
|
||||
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
|
||||
@ -765,8 +765,8 @@ void KScheduler::Initialize() {
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
|
||||
auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
auto thread_res = KThread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
idle_thread = thread_res.Unwrap().get();
|
||||
|
||||
{
|
||||
|
@ -29,7 +29,7 @@ namespace Kernel {
|
||||
class KernelCore;
|
||||
class Process;
|
||||
class SchedulerLock;
|
||||
class Thread;
|
||||
class KThread;
|
||||
|
||||
class KScheduler final {
|
||||
public:
|
||||
@ -45,13 +45,13 @@ public:
|
||||
|
||||
/// The next two are for SingleCore Only.
|
||||
/// Unload current thread before preempting core.
|
||||
void Unload(Thread* thread);
|
||||
void Unload(KThread* thread);
|
||||
|
||||
/// Reload current thread after core preemption.
|
||||
void Reload(Thread* thread);
|
||||
void Reload(KThread* thread);
|
||||
|
||||
/// Gets the current running thread
|
||||
[[nodiscard]] Thread* GetCurrentThread() const;
|
||||
[[nodiscard]] KThread* GetCurrentThread() const;
|
||||
|
||||
/// Gets the timestamp for the last context switch in ticks.
|
||||
[[nodiscard]] u64 GetLastContextSwitchTicks() const;
|
||||
@ -72,7 +72,7 @@ public:
|
||||
return switch_fiber;
|
||||
}
|
||||
|
||||
[[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread);
|
||||
[[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread);
|
||||
|
||||
/**
|
||||
* Takes a thread and moves it to the back of the it's priority list.
|
||||
@ -100,13 +100,13 @@ public:
|
||||
void YieldToAnyThread();
|
||||
|
||||
/// Notify the scheduler a thread's status has changed.
|
||||
static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state);
|
||||
static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state);
|
||||
|
||||
/// Notify the scheduler a thread's priority has changed.
|
||||
static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority);
|
||||
static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority);
|
||||
|
||||
/// Notify the scheduler a thread's core and/or affinity mask has changed.
|
||||
static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
|
||||
static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
|
||||
const KAffinityMask& old_affinity, s32 old_core);
|
||||
|
||||
static bool CanSchedule(KernelCore& kernel);
|
||||
@ -163,13 +163,13 @@ private:
|
||||
* most recent tick count retrieved. No special arithmetic is
|
||||
* applied to it.
|
||||
*/
|
||||
void UpdateLastContextSwitchTime(Thread* thread, Process* process);
|
||||
void UpdateLastContextSwitchTime(KThread* thread, Process* process);
|
||||
|
||||
static void OnSwitch(void* this_scheduler);
|
||||
void SwitchToCurrent();
|
||||
|
||||
Thread* current_thread{};
|
||||
Thread* idle_thread{};
|
||||
KThread* current_thread{};
|
||||
KThread* idle_thread{};
|
||||
|
||||
std::shared_ptr<Common::Fiber> switch_fiber{};
|
||||
|
||||
@ -178,7 +178,7 @@ private:
|
||||
bool interrupt_task_thread_runnable{};
|
||||
bool should_count_idle{};
|
||||
u64 idle_count{};
|
||||
Thread* highest_priority_thread{};
|
||||
KThread* highest_priority_thread{};
|
||||
void* idle_thread_stack{};
|
||||
};
|
||||
|
||||
|
@ -9,15 +9,15 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KScopedSchedulerLockAndSleep {
|
||||
public:
|
||||
explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t,
|
||||
explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, KThread* t,
|
||||
s64 timeout)
|
||||
: kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
|
||||
event_handle = InvalidHandle;
|
||||
@ -43,7 +43,7 @@ public:
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
Handle& event_handle;
|
||||
Thread* thread{};
|
||||
KThread* thread{};
|
||||
s64 timeout_tick{};
|
||||
};
|
||||
|
||||
|
@ -7,9 +7,9 @@
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
@ -20,7 +20,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
|
||||
std::vector<ThreadListNode> thread_nodes(num_objects);
|
||||
|
||||
// Prepare for wait.
|
||||
Thread* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
KThread* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Handle timer = InvalidHandle;
|
||||
|
||||
{
|
||||
@ -148,7 +148,7 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||
|
||||
// Iterate over each thread.
|
||||
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
||||
Thread* thread = cur_node->thread;
|
||||
KThread* thread = cur_node->thread;
|
||||
if (thread->GetState() == ThreadState::Waiting) {
|
||||
thread->SetSyncedObject(this, result);
|
||||
thread->SetState(ThreadState::Runnable);
|
||||
@ -156,8 +156,8 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Thread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
|
||||
std::vector<Thread*> threads;
|
||||
std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
|
||||
std::vector<KThread*> threads;
|
||||
|
||||
// If debugging, dump the list of waiters.
|
||||
{
|
||||
|
@ -13,14 +13,14 @@ namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Synchronization;
|
||||
class Thread;
|
||||
class KThread;
|
||||
|
||||
/// Class that represents a Kernel object that a thread can be waiting on
|
||||
class KSynchronizationObject : public Object {
|
||||
public:
|
||||
struct ThreadListNode {
|
||||
ThreadListNode* next{};
|
||||
Thread* thread{};
|
||||
KThread* thread{};
|
||||
};
|
||||
|
||||
[[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
|
||||
@ -29,7 +29,7 @@ public:
|
||||
|
||||
[[nodiscard]] virtual bool IsSignaled() const = 0;
|
||||
|
||||
[[nodiscard]] std::vector<Thread*> GetWaitingThreadsForDebugging() const;
|
||||
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
|
||||
|
||||
protected:
|
||||
explicit KSynchronizationObject(KernelCore& kernel);
|
||||
|
@ -20,11 +20,11 @@
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
@ -36,14 +36,14 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
bool Thread::IsSignaled() const {
|
||||
bool KThread::IsSignaled() const {
|
||||
return signaled;
|
||||
}
|
||||
|
||||
Thread::Thread(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
Thread::~Thread() = default;
|
||||
KThread::KThread(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
KThread::~KThread() = default;
|
||||
|
||||
void Thread::Stop() {
|
||||
void KThread::Stop() {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetState(ThreadState::Terminated);
|
||||
@ -62,18 +62,18 @@ void Thread::Stop() {
|
||||
global_handle = 0;
|
||||
}
|
||||
|
||||
void Thread::Wakeup() {
|
||||
void KThread::Wakeup() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetState(ThreadState::Runnable);
|
||||
}
|
||||
|
||||
ResultCode Thread::Start() {
|
||||
ResultCode KThread::Start() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetState(ThreadState::Runnable);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
void Thread::CancelWait() {
|
||||
void KThread::CancelWait() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (GetState() != ThreadState::Waiting || !is_cancellable) {
|
||||
is_sync_cancelled = true;
|
||||
@ -103,26 +103,26 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
|
||||
context.fpcr = 0;
|
||||
}
|
||||
|
||||
std::shared_ptr<Common::Fiber>& Thread::GetHostContext() {
|
||||
std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
|
||||
return host_context;
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point, u32 priority,
|
||||
u64 arg, s32 processor_id, VAddr stack_top,
|
||||
Process* owner_process) {
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process) {
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
|
||||
owner_process, std::move(init_func), init_func_parameter);
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point, u32 priority,
|
||||
u64 arg, s32 processor_id, VAddr stack_top,
|
||||
Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func,
|
||||
void* thread_start_parameter) {
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func,
|
||||
void* thread_start_parameter) {
|
||||
auto& kernel = system.Kernel();
|
||||
// Check if priority is in ranged. Lowest priority -> highest priority id.
|
||||
if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) {
|
||||
@ -143,7 +143,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
|
||||
std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel);
|
||||
|
||||
thread->thread_id = kernel.CreateNewThreadID();
|
||||
thread->thread_state = ThreadState::Initialized;
|
||||
@ -185,10 +185,10 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
|
||||
thread->host_context =
|
||||
std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
|
||||
|
||||
return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
|
||||
return MakeResult<std::shared_ptr<KThread>>(std::move(thread));
|
||||
}
|
||||
|
||||
void Thread::SetBasePriority(u32 priority) {
|
||||
void KThread::SetBasePriority(u32 priority) {
|
||||
ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
|
||||
"Invalid priority value.");
|
||||
|
||||
@ -201,18 +201,18 @@ void Thread::SetBasePriority(u32 priority) {
|
||||
RestorePriority(kernel, this);
|
||||
}
|
||||
|
||||
void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) {
|
||||
void KThread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) {
|
||||
signaling_object = object;
|
||||
signaling_result = result;
|
||||
}
|
||||
|
||||
VAddr Thread::GetCommandBufferAddress() const {
|
||||
VAddr KThread::GetCommandBufferAddress() const {
|
||||
// Offset from the start of TLS at which the IPC command buffer begins.
|
||||
constexpr u64 command_header_offset = 0x80;
|
||||
return GetTLSAddress() + command_header_offset;
|
||||
}
|
||||
|
||||
void Thread::SetState(ThreadState state) {
|
||||
void KThread::SetState(ThreadState state) {
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
// Clear debugging state
|
||||
@ -227,7 +227,7 @@ void Thread::SetState(ThreadState state) {
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::AddWaiterImpl(Thread* thread) {
|
||||
void KThread::AddWaiterImpl(KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Find the right spot to insert the waiter.
|
||||
@ -249,7 +249,7 @@ void Thread::AddWaiterImpl(Thread* thread) {
|
||||
thread->SetLockOwner(this);
|
||||
}
|
||||
|
||||
void Thread::RemoveWaiterImpl(Thread* thread) {
|
||||
void KThread::RemoveWaiterImpl(KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
@ -262,7 +262,7 @@ void Thread::RemoveWaiterImpl(Thread* thread) {
|
||||
thread->SetLockOwner(nullptr);
|
||||
}
|
||||
|
||||
void Thread::RestorePriority(KernelCore& kernel, Thread* thread) {
|
||||
void KThread::RestorePriority(KernelCore& kernel, KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
while (true) {
|
||||
@ -295,7 +295,7 @@ void Thread::RestorePriority(KernelCore& kernel, Thread* thread) {
|
||||
KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
|
||||
|
||||
// Keep the lock owner up to date.
|
||||
Thread* lock_owner = thread->GetLockOwner();
|
||||
KThread* lock_owner = thread->GetLockOwner();
|
||||
if (lock_owner == nullptr) {
|
||||
return;
|
||||
}
|
||||
@ -307,25 +307,25 @@ void Thread::RestorePriority(KernelCore& kernel, Thread* thread) {
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::AddWaiter(Thread* thread) {
|
||||
void KThread::AddWaiter(KThread* thread) {
|
||||
AddWaiterImpl(thread);
|
||||
RestorePriority(kernel, this);
|
||||
}
|
||||
|
||||
void Thread::RemoveWaiter(Thread* thread) {
|
||||
void KThread::RemoveWaiter(KThread* thread) {
|
||||
RemoveWaiterImpl(thread);
|
||||
RestorePriority(kernel, this);
|
||||
}
|
||||
|
||||
Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
|
||||
KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
s32 num_waiters{};
|
||||
Thread* next_lock_owner{};
|
||||
KThread* next_lock_owner{};
|
||||
auto it = waiter_list.begin();
|
||||
while (it != waiter_list.end()) {
|
||||
if (it->GetAddressKey() == key) {
|
||||
Thread* thread = std::addressof(*it);
|
||||
KThread* thread = std::addressof(*it);
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
@ -357,7 +357,7 @@ Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
|
||||
return next_lock_owner;
|
||||
}
|
||||
|
||||
ResultCode Thread::SetActivity(ThreadActivity value) {
|
||||
ResultCode KThread::SetActivity(ThreadActivity value) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
auto sched_status = GetState();
|
||||
@ -384,7 +384,7 @@ ResultCode Thread::SetActivity(ThreadActivity value) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode Thread::Sleep(s64 nanoseconds) {
|
||||
ResultCode KThread::Sleep(s64 nanoseconds) {
|
||||
Handle event_handle{};
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
|
||||
@ -399,7 +399,7 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
|
||||
void KThread::AddSchedulingFlag(ThreadSchedFlags flag) {
|
||||
const auto old_state = GetRawState();
|
||||
pausing_state |= static_cast<u32>(flag);
|
||||
const auto base_scheduling = GetState();
|
||||
@ -407,7 +407,7 @@ void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
|
||||
void KThread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
|
||||
const auto old_state = GetRawState();
|
||||
pausing_state &= ~static_cast<u32>(flag);
|
||||
const auto base_scheduling = GetState();
|
||||
@ -415,7 +415,7 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
||||
ResultCode KThread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
|
||||
for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
|
@ -124,15 +124,15 @@ enum class ThreadWaitReasonForDebugging : u32 {
|
||||
Suspended, ///< Thread is waiting due to process suspension
|
||||
};
|
||||
|
||||
class Thread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
|
||||
class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
|
||||
friend class KScheduler;
|
||||
friend class Process;
|
||||
|
||||
public:
|
||||
explicit Thread(KernelCore& kernel);
|
||||
~Thread() override;
|
||||
explicit KThread(KernelCore& kernel);
|
||||
~KThread() override;
|
||||
|
||||
using MutexWaitingThreads = std::vector<std::shared_ptr<Thread>>;
|
||||
using MutexWaitingThreads = std::vector<std::shared_ptr<KThread>>;
|
||||
|
||||
using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
|
||||
using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
|
||||
@ -149,10 +149,10 @@ public:
|
||||
* @param owner_process The parent process for the thread, if null, it's a kernel thread
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process);
|
||||
static ResultVal<std::shared_ptr<KThread>> Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process);
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
@ -168,12 +168,10 @@ public:
|
||||
* @param thread_start_parameter The parameter which will passed to host context on init
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func,
|
||||
void* thread_start_parameter);
|
||||
static ResultVal<std::shared_ptr<KThread>> Create(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
@ -387,11 +385,11 @@ public:
|
||||
return wait_mutex_threads;
|
||||
}
|
||||
|
||||
Thread* GetLockOwner() const {
|
||||
KThread* GetLockOwner() const {
|
||||
return lock_owner;
|
||||
}
|
||||
|
||||
void SetLockOwner(Thread* owner) {
|
||||
void SetLockOwner(KThread* owner) {
|
||||
lock_owner = owner;
|
||||
}
|
||||
|
||||
@ -485,22 +483,22 @@ public:
|
||||
next = nullptr;
|
||||
}
|
||||
|
||||
constexpr Thread* GetPrev() const {
|
||||
constexpr KThread* GetPrev() const {
|
||||
return prev;
|
||||
}
|
||||
constexpr Thread* GetNext() const {
|
||||
constexpr KThread* GetNext() const {
|
||||
return next;
|
||||
}
|
||||
constexpr void SetPrev(Thread* thread) {
|
||||
constexpr void SetPrev(KThread* thread) {
|
||||
prev = thread;
|
||||
}
|
||||
constexpr void SetNext(Thread* thread) {
|
||||
constexpr void SetNext(KThread* thread) {
|
||||
next = thread;
|
||||
}
|
||||
|
||||
private:
|
||||
Thread* prev{};
|
||||
Thread* next{};
|
||||
KThread* prev{};
|
||||
KThread* next{};
|
||||
};
|
||||
|
||||
QueueEntry& GetPriorityQueueEntry(s32 core) {
|
||||
@ -553,11 +551,11 @@ public:
|
||||
return mutex_wait_address_for_debugging;
|
||||
}
|
||||
|
||||
void AddWaiter(Thread* thread);
|
||||
void AddWaiter(KThread* thread);
|
||||
|
||||
void RemoveWaiter(Thread* thread);
|
||||
void RemoveWaiter(KThread* thread);
|
||||
|
||||
[[nodiscard]] Thread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
|
||||
[[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
|
||||
|
||||
[[nodiscard]] VAddr GetAddressKey() const {
|
||||
return address_key;
|
||||
@ -603,9 +601,9 @@ private:
|
||||
|
||||
template <typename T>
|
||||
requires(
|
||||
std::same_as<T, Thread> ||
|
||||
std::same_as<T, KThread> ||
|
||||
std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
|
||||
const Thread& rhs) {
|
||||
const KThread& rhs) {
|
||||
const uintptr_t l_key = lhs.GetConditionVariableKey();
|
||||
const uintptr_t r_key = rhs.GetConditionVariableKey();
|
||||
|
||||
@ -624,7 +622,8 @@ private:
|
||||
Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
|
||||
|
||||
using ConditionVariableThreadTreeTraits =
|
||||
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&Thread::condvar_arbiter_tree_node>;
|
||||
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
|
||||
&KThread::condvar_arbiter_tree_node>;
|
||||
using ConditionVariableThreadTree =
|
||||
ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
|
||||
|
||||
@ -679,9 +678,9 @@ public:
|
||||
private:
|
||||
void AddSchedulingFlag(ThreadSchedFlags flag);
|
||||
void RemoveSchedulingFlag(ThreadSchedFlags flag);
|
||||
void AddWaiterImpl(Thread* thread);
|
||||
void RemoveWaiterImpl(Thread* thread);
|
||||
static void RestorePriority(KernelCore& kernel, Thread* thread);
|
||||
void AddWaiterImpl(KThread* thread);
|
||||
void RemoveWaiterImpl(KThread* thread);
|
||||
static void RestorePriority(KernelCore& kernel, KThread* thread);
|
||||
|
||||
Common::SpinLock context_guard{};
|
||||
ThreadContext32 context_32{};
|
||||
@ -736,7 +735,7 @@ private:
|
||||
MutexWaitingThreads wait_mutex_threads;
|
||||
|
||||
/// Thread that owns the lock that this thread is waiting for.
|
||||
Thread* lock_owner{};
|
||||
KThread* lock_owner{};
|
||||
|
||||
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
|
||||
Handle global_handle = 0;
|
||||
@ -772,7 +771,7 @@ private:
|
||||
u32 address_key_value{};
|
||||
s32 num_kernel_waiters{};
|
||||
|
||||
using WaiterList = boost::intrusive::list<Thread>;
|
||||
using WaiterList = boost::intrusive::list<KThread>;
|
||||
WaiterList waiter_list{};
|
||||
WaiterList pinned_waiter_list{};
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
@ -38,7 +39,6 @@
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "core/hle/result.h"
|
||||
@ -171,8 +171,8 @@ struct KernelCore::Impl {
|
||||
const auto type =
|
||||
static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
|
||||
auto thread_res =
|
||||
Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
KThread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
|
||||
suspend_threads[i] = std::move(thread_res).Unwrap();
|
||||
}
|
||||
@ -236,7 +236,7 @@ struct KernelCore::Impl {
|
||||
return result;
|
||||
}
|
||||
const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler();
|
||||
const Kernel::Thread* current = sched.GetCurrentThread();
|
||||
const Kernel::KThread* current = sched.GetCurrentThread();
|
||||
if (current != nullptr && !current->IsPhantomMode()) {
|
||||
result.guest_handle = current->GetGlobalHandle();
|
||||
} else {
|
||||
@ -342,7 +342,7 @@ struct KernelCore::Impl {
|
||||
// the release of itself
|
||||
std::unique_ptr<Common::ThreadWorker> service_thread_manager;
|
||||
|
||||
std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
|
||||
std::array<std::shared_ptr<KThread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
|
||||
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
||||
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
||||
|
||||
@ -380,8 +380,8 @@ std::shared_ptr<ResourceLimit> KernelCore::GetSystemResourceLimit() const {
|
||||
return impl->system_resource_limit;
|
||||
}
|
||||
|
||||
std::shared_ptr<Thread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
|
||||
return impl->global_handle_table.Get<Thread>(handle);
|
||||
std::shared_ptr<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
|
||||
return impl->global_handle_table.Get<KThread>(handle);
|
||||
}
|
||||
|
||||
void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) {
|
||||
|
@ -43,7 +43,7 @@ class KScheduler;
|
||||
class SharedMemory;
|
||||
class ServiceThread;
|
||||
class Synchronization;
|
||||
class Thread;
|
||||
class KThread;
|
||||
class TimeManager;
|
||||
|
||||
/// Represents a single instance of the kernel.
|
||||
@ -84,7 +84,7 @@ public:
|
||||
std::shared_ptr<ResourceLimit> GetSystemResourceLimit() const;
|
||||
|
||||
/// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
|
||||
std::shared_ptr<Thread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
|
||||
std::shared_ptr<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
|
||||
|
||||
/// Adds the given shared pointer to an internal list of active processes.
|
||||
void AppendNewProcess(std::shared_ptr<Process> process);
|
||||
@ -240,7 +240,7 @@ public:
|
||||
private:
|
||||
friend class Object;
|
||||
friend class Process;
|
||||
friend class Thread;
|
||||
friend class KThread;
|
||||
|
||||
/// Creates a new object ID, incrementing the internal object ID counter.
|
||||
u32 CreateNewObjectID();
|
||||
|
@ -16,13 +16,13 @@
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/memory/slab_heap.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/settings.h"
|
||||
@ -39,10 +39,10 @@ namespace {
|
||||
void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
|
||||
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
|
||||
ThreadType type = THREADTYPE_USER;
|
||||
auto thread_res = Thread::Create(system, type, "main", entry_point, priority, 0,
|
||||
owner_process.GetIdealCore(), stack_top, &owner_process);
|
||||
auto thread_res = KThread::Create(system, type, "main", entry_point, priority, 0,
|
||||
owner_process.GetIdealCore(), stack_top, &owner_process);
|
||||
|
||||
std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap();
|
||||
std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap();
|
||||
|
||||
// Register 1 must be a handle to the main thread
|
||||
const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap();
|
||||
@ -162,11 +162,11 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
|
||||
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
|
||||
}
|
||||
|
||||
void Process::RegisterThread(const Thread* thread) {
|
||||
void Process::RegisterThread(const KThread* thread) {
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
|
||||
void Process::UnregisterThread(const Thread* thread) {
|
||||
void Process::UnregisterThread(const KThread* thread) {
|
||||
thread_list.remove(thread);
|
||||
}
|
||||
|
||||
@ -267,7 +267,7 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
|
||||
void Process::PrepareForTermination() {
|
||||
ChangeStatus(ProcessStatus::Exiting);
|
||||
|
||||
const auto stop_threads = [this](const std::vector<std::shared_ptr<Thread>>& thread_list) {
|
||||
const auto stop_threads = [this](const std::vector<std::shared_ptr<KThread>>& thread_list) {
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->GetOwnerProcess() != this)
|
||||
continue;
|
||||
|
@ -30,7 +30,7 @@ namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class ResourceLimit;
|
||||
class Thread;
|
||||
class KThread;
|
||||
class TLSPage;
|
||||
|
||||
struct CodeSet;
|
||||
@ -252,17 +252,17 @@ public:
|
||||
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
|
||||
|
||||
/// Gets the list of all threads created with this process as their owner.
|
||||
const std::list<const Thread*>& GetThreadList() const {
|
||||
const std::list<const KThread*>& GetThreadList() const {
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
/// Registers a thread as being created under this process,
|
||||
/// adding it to this process' thread list.
|
||||
void RegisterThread(const Thread* thread);
|
||||
void RegisterThread(const KThread* thread);
|
||||
|
||||
/// Unregisters a thread from this process, removing it
|
||||
/// from this process' thread list.
|
||||
void UnregisterThread(const Thread* thread);
|
||||
void UnregisterThread(const KThread* thread);
|
||||
|
||||
/// Clears the signaled state of the process if and only if it's signaled.
|
||||
///
|
||||
@ -380,7 +380,7 @@ private:
|
||||
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
|
||||
|
||||
/// List of threads that are running with this process as their owner.
|
||||
std::list<const Thread*> thread_list;
|
||||
std::list<const KThread*> thread_list;
|
||||
|
||||
/// Address of the top of the main thread's stack
|
||||
VAddr main_thread_stack_top{};
|
||||
|
@ -7,10 +7,10 @@
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
|
@ -6,10 +6,10 @@
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/server_port.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
|
@ -15,11 +15,11 @@
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
@ -116,7 +116,7 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread,
|
||||
ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<KThread> thread,
|
||||
Core::Memory::Memory& memory) {
|
||||
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
|
||||
auto context =
|
||||
@ -161,7 +161,7 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
return result;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
|
||||
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<KThread> thread,
|
||||
Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
return QueueSyncRequest(std::move(thread), memory);
|
||||
|
@ -29,7 +29,7 @@ class HLERequestContext;
|
||||
class KernelCore;
|
||||
class Session;
|
||||
class SessionRequestHandler;
|
||||
class Thread;
|
||||
class KThread;
|
||||
|
||||
/**
|
||||
* Kernel object representing the server endpoint of an IPC session. Sessions are the basic CTR-OS
|
||||
@ -95,7 +95,7 @@ public:
|
||||
*
|
||||
* @returns ResultCode from the operation.
|
||||
*/
|
||||
ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
|
||||
ResultCode HandleSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
/// Called when a client disconnection occurs.
|
||||
@ -128,7 +128,7 @@ public:
|
||||
|
||||
private:
|
||||
/// Queues a sync request from the emulated application.
|
||||
ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
|
||||
ResultCode QueueSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory);
|
||||
|
||||
/// Completes a sync request from the emulated application.
|
||||
ResultCode CompleteSyncRequest(HLERequestContext& context);
|
||||
@ -149,12 +149,12 @@ private:
|
||||
/// List of threads that are pending a response after a sync request. This list is processed in
|
||||
/// a LIFO manner, thus, the last request will be dispatched first.
|
||||
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
|
||||
std::vector<std::shared_ptr<Thread>> pending_requesting_threads;
|
||||
std::vector<std::shared_ptr<KThread>> pending_requesting_threads;
|
||||
|
||||
/// Thread whose request is currently being handled. A request is considered "handled" when a
|
||||
/// response is sent via svcReplyAndReceive.
|
||||
/// TODO(Subv): Find a better name for this.
|
||||
std::shared_ptr<Thread> currently_handling;
|
||||
std::shared_ptr<KThread> currently_handling;
|
||||
|
||||
/// When set to True, converts the session to a domain at the end of the command
|
||||
bool convert_to_domain{};
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
@ -42,7 +43,6 @@
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
#include "core/hle/kernel/svc_wrap.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/kernel/transfer_memory.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
@ -363,7 +363,7 @@ static ResultCode GetThreadId(Core::System& system, u64* thread_id, Handle threa
|
||||
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", thread_handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
@ -395,7 +395,7 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle);
|
||||
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
|
||||
if (thread) {
|
||||
const Process* const owner_process = thread->GetOwnerProcess();
|
||||
if (!owner_process) {
|
||||
@ -474,7 +474,7 @@ static ResultCode CancelSynchronization(Core::System& system, Handle thread_hand
|
||||
LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle);
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
||||
thread_handle);
|
||||
@ -872,7 +872,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
|
||||
const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<Thread>(
|
||||
const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<KThread>(
|
||||
static_cast<Handle>(handle));
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
|
||||
@ -1033,7 +1033,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
|
||||
}
|
||||
|
||||
const auto* current_process = system.Kernel().CurrentProcess();
|
||||
const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
||||
const std::shared_ptr<KThread> thread = current_process->GetHandleTable().Get<KThread>(handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
@ -1066,7 +1066,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
|
||||
LOG_DEBUG(Kernel_SVC, "called, context=0x{:08X}, thread=0x{:X}", thread_context, handle);
|
||||
|
||||
const auto* current_process = system.Kernel().CurrentProcess();
|
||||
const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
||||
const std::shared_ptr<KThread> thread = current_process->GetHandleTable().Get<KThread>(handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
@ -1111,7 +1111,7 @@ static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle
|
||||
LOG_TRACE(Kernel_SVC, "called");
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle);
|
||||
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
|
||||
if (!thread) {
|
||||
*priority = 0;
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
||||
@ -1140,7 +1140,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
|
||||
|
||||
const auto* const current_process = system.Kernel().CurrentProcess();
|
||||
|
||||
std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
||||
std::shared_ptr<KThread> thread = current_process->GetHandleTable().Get<KThread>(handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
@ -1489,9 +1489,9 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
|
||||
ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1));
|
||||
|
||||
ThreadType type = THREADTYPE_USER;
|
||||
CASCADE_RESULT(std::shared_ptr<Thread> thread,
|
||||
Thread::Create(system, type, "", entry_point, priority, arg, processor_id,
|
||||
stack_top, current_process));
|
||||
CASCADE_RESULT(std::shared_ptr<KThread> thread,
|
||||
KThread::Create(system, type, "", entry_point, priority, arg, processor_id,
|
||||
stack_top, current_process));
|
||||
|
||||
const auto new_thread_handle = current_process->GetHandleTable().Create(thread);
|
||||
if (new_thread_handle.Failed()) {
|
||||
@ -1518,7 +1518,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
|
||||
LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
||||
thread_handle);
|
||||
@ -1844,7 +1844,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
|
||||
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
||||
thread_handle);
|
||||
@ -1914,7 +1914,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
|
||||
}
|
||||
|
||||
const auto& handle_table = current_process->GetHandleTable();
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
||||
thread_handle);
|
||||
|
@ -8,8 +8,8 @@
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
@ -18,7 +18,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||
time_manager_event_type = Core::Timing::CreateEvent(
|
||||
"Kernel::TimeManagerCallback",
|
||||
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
||||
std::shared_ptr<Thread> thread;
|
||||
std::shared_ptr<KThread> thread;
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto proper_handle = static_cast<Handle>(thread_handle);
|
||||
@ -35,7 +35,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||
});
|
||||
}
|
||||
|
||||
void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) {
|
||||
void TimeManager::ScheduleTimeEvent(Handle& event_handle, KThread* timetask, s64 nanoseconds) {
|
||||
std::lock_guard lock{mutex};
|
||||
event_handle = timetask->GetGlobalHandle();
|
||||
if (nanoseconds > 0) {
|
||||
@ -58,7 +58,7 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
|
||||
cancelled_events[event_handle] = true;
|
||||
}
|
||||
|
||||
void TimeManager::CancelTimeEvent(Thread* time_task) {
|
||||
void TimeManager::CancelTimeEvent(KThread* time_task) {
|
||||
std::lock_guard lock{mutex};
|
||||
const Handle event_handle = time_task->GetGlobalHandle();
|
||||
UnscheduleTimeEvent(event_handle);
|
||||
|
@ -20,7 +20,7 @@ struct EventType;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Thread;
|
||||
class KThread;
|
||||
|
||||
/**
|
||||
* The `TimeManager` takes care of scheduling time events on threads and executes their TimeUp
|
||||
@ -32,12 +32,12 @@ public:
|
||||
|
||||
/// Schedule a time event on `timetask` thread that will expire in 'nanoseconds'
|
||||
/// returns a non-invalid handle in `event_handle` if correctly scheduled
|
||||
void ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds);
|
||||
void ScheduleTimeEvent(Handle& event_handle, KThread* timetask, s64 nanoseconds);
|
||||
|
||||
/// Unschedule an existing time event
|
||||
void UnscheduleTimeEvent(Handle event_handle);
|
||||
|
||||
void CancelTimeEvent(Thread* time_task);
|
||||
void CancelTimeEvent(KThread* time_task);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
|
@ -4,10 +4,10 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
@ -8,9 +8,9 @@
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "core/hle/service/nfp/nfp.h"
|
||||
|
@ -6,9 +6,9 @@
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/hle/service/nvdrv/interface.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
|
@ -11,10 +11,10 @@
|
||||
#include "core/hle/ipc.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/server_port.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/service/acc/acc.h"
|
||||
#include "core/hle/service/am/am.h"
|
||||
#include "core/hle/service/aoc/aoc_u.h"
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include "common/microprofile.h"
|
||||
#include "common/thread.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/service/sockets/bsd.h"
|
||||
#include "core/hle/service/sockets/sockets_translate.h"
|
||||
#include "core/network/network.h"
|
||||
|
@ -121,7 +121,7 @@ private:
|
||||
};
|
||||
|
||||
ResultCode Module::Interface::GetClockSnapshotFromSystemClockContextInternal(
|
||||
Kernel::Thread* thread, Clock::SystemClockContext user_context,
|
||||
Kernel::KThread* thread, Clock::SystemClockContext user_context,
|
||||
Clock::SystemClockContext network_context, u8 type, Clock::ClockSnapshot& clock_snapshot) {
|
||||
|
||||
auto& time_manager{system.GetTimeManager()};
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
|
||||
private:
|
||||
ResultCode GetClockSnapshotFromSystemClockContextInternal(
|
||||
Kernel::Thread* thread, Clock::SystemClockContext user_context,
|
||||
Kernel::KThread* thread, Clock::SystemClockContext user_context,
|
||||
Clock::SystemClockContext network_context, u8 type,
|
||||
Clock::ClockSnapshot& cloc_snapshot);
|
||||
|
||||
|
@ -6,8 +6,8 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/uuid.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/service/time/clock_types.h"
|
||||
|
||||
namespace Service::Time {
|
||||
|
@ -18,8 +18,8 @@
|
||||
#include "common/swap.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
|
@ -15,9 +15,9 @@
|
||||
#include "core/file_sys/romfs_factory.h"
|
||||
#include "core/file_sys/vfs_offset.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/service/filesystem/filesystem.h"
|
||||
#include "core/loader/nro.h"
|
||||
#include "core/loader/nso.h"
|
||||
|
@ -15,9 +15,9 @@
|
||||
#include "core/core.h"
|
||||
#include "core/file_sys/patch_manager.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/loader/nso.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/settings.h"
|
||||
|
@ -15,10 +15,10 @@
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace {
|
||||
@ -90,7 +90,7 @@ std::size_t WaitTreeItem::Row() const {
|
||||
std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList() {
|
||||
std::vector<std::unique_ptr<WaitTreeThread>> item_list;
|
||||
std::size_t row = 0;
|
||||
auto add_threads = [&](const std::vector<std::shared_ptr<Kernel::Thread>>& threads) {
|
||||
auto add_threads = [&](const std::vector<std::shared_ptr<Kernel::KThread>>& threads) {
|
||||
for (std::size_t i = 0; i < threads.size(); ++i) {
|
||||
if (!threads[i]->IsHLEThread()) {
|
||||
item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i]));
|
||||
@ -117,7 +117,7 @@ WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTa
|
||||
: mutex_address(mutex_address) {
|
||||
mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address);
|
||||
owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Svc::HandleWaitMask);
|
||||
owner = handle_table.Get<Kernel::Thread>(owner_handle);
|
||||
owner = handle_table.Get<Kernel::KThread>(owner_handle);
|
||||
}
|
||||
|
||||
WaitTreeMutexInfo::~WaitTreeMutexInfo() = default;
|
||||
@ -139,7 +139,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() cons
|
||||
return list;
|
||||
}
|
||||
|
||||
WaitTreeCallstack::WaitTreeCallstack(const Kernel::Thread& thread) : thread(thread) {}
|
||||
WaitTreeCallstack::WaitTreeCallstack(const Kernel::KThread& thread) : thread(thread) {}
|
||||
WaitTreeCallstack::~WaitTreeCallstack() = default;
|
||||
|
||||
QString WaitTreeCallstack::GetText() const {
|
||||
@ -194,7 +194,7 @@ std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::ma
|
||||
case Kernel::HandleType::ReadableEvent:
|
||||
return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::ReadableEvent&>(object));
|
||||
case Kernel::HandleType::Thread:
|
||||
return std::make_unique<WaitTreeThread>(static_cast<const Kernel::Thread&>(object));
|
||||
return std::make_unique<WaitTreeThread>(static_cast<const Kernel::KThread&>(object));
|
||||
default:
|
||||
return std::make_unique<WaitTreeSynchronizationObject>(object);
|
||||
}
|
||||
@ -231,12 +231,12 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeObjectList::GetChildren() con
|
||||
return list;
|
||||
}
|
||||
|
||||
WaitTreeThread::WaitTreeThread(const Kernel::Thread& thread)
|
||||
WaitTreeThread::WaitTreeThread(const Kernel::KThread& thread)
|
||||
: WaitTreeSynchronizationObject(thread) {}
|
||||
WaitTreeThread::~WaitTreeThread() = default;
|
||||
|
||||
QString WaitTreeThread::GetText() const {
|
||||
const auto& thread = static_cast<const Kernel::Thread&>(object);
|
||||
const auto& thread = static_cast<const Kernel::KThread&>(object);
|
||||
QString status;
|
||||
switch (thread.GetState()) {
|
||||
case Kernel::ThreadState::Runnable:
|
||||
@ -297,7 +297,7 @@ QString WaitTreeThread::GetText() const {
|
||||
QColor WaitTreeThread::GetColor() const {
|
||||
const std::size_t color_index = IsDarkTheme() ? 1 : 0;
|
||||
|
||||
const auto& thread = static_cast<const Kernel::Thread&>(object);
|
||||
const auto& thread = static_cast<const Kernel::KThread&>(object);
|
||||
switch (thread.GetState()) {
|
||||
case Kernel::ThreadState::Runnable:
|
||||
if (!thread.IsPaused()) {
|
||||
@ -336,7 +336,7 @@ QColor WaitTreeThread::GetColor() const {
|
||||
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
|
||||
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeSynchronizationObject::GetChildren());
|
||||
|
||||
const auto& thread = static_cast<const Kernel::Thread&>(object);
|
||||
const auto& thread = static_cast<const Kernel::KThread&>(object);
|
||||
|
||||
QString processor;
|
||||
switch (thread.GetProcessorID()) {
|
||||
@ -390,7 +390,7 @@ WaitTreeEvent::WaitTreeEvent(const Kernel::ReadableEvent& object)
|
||||
: WaitTreeSynchronizationObject(object) {}
|
||||
WaitTreeEvent::~WaitTreeEvent() = default;
|
||||
|
||||
WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::Thread*>& list)
|
||||
WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::KThread*>& list)
|
||||
: thread_list(list) {}
|
||||
WaitTreeThreadList::~WaitTreeThreadList() = default;
|
||||
|
||||
|
@ -19,8 +19,8 @@ class EmuThread;
|
||||
namespace Kernel {
|
||||
class HandleTable;
|
||||
class KSynchronizationObject;
|
||||
class KThread;
|
||||
class ReadableEvent;
|
||||
class Thread;
|
||||
} // namespace Kernel
|
||||
|
||||
class WaitTreeThread;
|
||||
@ -83,20 +83,20 @@ private:
|
||||
VAddr mutex_address;
|
||||
u32 mutex_value;
|
||||
Kernel::Handle owner_handle;
|
||||
std::shared_ptr<Kernel::Thread> owner;
|
||||
std::shared_ptr<Kernel::KThread> owner;
|
||||
};
|
||||
|
||||
class WaitTreeCallstack : public WaitTreeExpandableItem {
|
||||
Q_OBJECT
|
||||
public:
|
||||
explicit WaitTreeCallstack(const Kernel::Thread& thread);
|
||||
explicit WaitTreeCallstack(const Kernel::KThread& thread);
|
||||
~WaitTreeCallstack() override;
|
||||
|
||||
QString GetText() const override;
|
||||
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
|
||||
|
||||
private:
|
||||
const Kernel::Thread& thread;
|
||||
const Kernel::KThread& thread;
|
||||
};
|
||||
|
||||
class WaitTreeSynchronizationObject : public WaitTreeExpandableItem {
|
||||
@ -131,7 +131,7 @@ private:
|
||||
class WaitTreeThread : public WaitTreeSynchronizationObject {
|
||||
Q_OBJECT
|
||||
public:
|
||||
explicit WaitTreeThread(const Kernel::Thread& thread);
|
||||
explicit WaitTreeThread(const Kernel::KThread& thread);
|
||||
~WaitTreeThread() override;
|
||||
|
||||
QString GetText() const override;
|
||||
@ -149,14 +149,14 @@ public:
|
||||
class WaitTreeThreadList : public WaitTreeExpandableItem {
|
||||
Q_OBJECT
|
||||
public:
|
||||
explicit WaitTreeThreadList(const std::vector<Kernel::Thread*>& list);
|
||||
explicit WaitTreeThreadList(const std::vector<Kernel::KThread*>& list);
|
||||
~WaitTreeThreadList() override;
|
||||
|
||||
QString GetText() const override;
|
||||
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
|
||||
|
||||
private:
|
||||
const std::vector<Kernel::Thread*>& thread_list;
|
||||
const std::vector<Kernel::KThread*>& thread_list;
|
||||
};
|
||||
|
||||
class WaitTreeModel : public QAbstractItemModel {
|
||||
|
Loading…
Reference in New Issue
Block a user