mirror of
https://github.com/yuzu-emu/yuzu-android
synced 2024-12-23 17:11:21 -08:00
kernel/thread: Make all instance variables private
Many of the member variables of the thread class aren't even used outside of the class itself, so there's no need to make those variables public. This change follows in the steps of the previous changes that made other kernel types' members private. The main motivation behind this is that the Thread class will likely change in the future as emulation becomes more accurate, and letting random bits of the emulator access data members of the Thread class directly makes it a pain to shuffle around and/or modify internals. Having all data members public like this also makes it difficult to reason about certain bits of behavior without first verifying what parts of the core actually use them. Everything being public also generally follows the tendency for changes to be introduced in completely different translation units that would otherwise be better introduced as an addition to the Thread class' public interface.
This commit is contained in:
parent
bc679c9b8c
commit
baed7e1fba
@ -86,7 +86,7 @@ public:
|
||||
parent.jit->HaltExecution();
|
||||
parent.SetPC(pc);
|
||||
Kernel::Thread* thread = Kernel::GetCurrentThread();
|
||||
parent.SaveContext(thread->context);
|
||||
parent.SaveContext(thread->GetContext());
|
||||
GDBStub::Break();
|
||||
GDBStub::SendTrap(thread, 5);
|
||||
return;
|
||||
|
@ -195,7 +195,7 @@ void ARM_Unicorn::ExecuteInstructions(int num_instructions) {
|
||||
uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address);
|
||||
}
|
||||
Kernel::Thread* thread = Kernel::GetCurrentThread();
|
||||
SaveContext(thread->context);
|
||||
SaveContext(thread->GetContext());
|
||||
if (last_bkpt_hit || GDBStub::GetCpuStepFlag()) {
|
||||
last_bkpt_hit = false;
|
||||
GDBStub::Break();
|
||||
|
@ -209,7 +209,7 @@ static Kernel::Thread* FindThreadById(int id) {
|
||||
for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) {
|
||||
const auto& threads = Core::System::GetInstance().Scheduler(core)->GetThreadList();
|
||||
for (auto& thread : threads) {
|
||||
if (thread->GetThreadId() == static_cast<u32>(id)) {
|
||||
if (thread->GetThreadID() == static_cast<u32>(id)) {
|
||||
current_core = core;
|
||||
return thread.get();
|
||||
}
|
||||
@ -223,16 +223,18 @@ static u64 RegRead(std::size_t id, Kernel::Thread* thread = nullptr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const auto& thread_context = thread->GetContext();
|
||||
|
||||
if (id < SP_REGISTER) {
|
||||
return thread->context.cpu_registers[id];
|
||||
return thread_context.cpu_registers[id];
|
||||
} else if (id == SP_REGISTER) {
|
||||
return thread->context.sp;
|
||||
return thread_context.sp;
|
||||
} else if (id == PC_REGISTER) {
|
||||
return thread->context.pc;
|
||||
return thread_context.pc;
|
||||
} else if (id == PSTATE_REGISTER) {
|
||||
return thread->context.pstate;
|
||||
return thread_context.pstate;
|
||||
} else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) {
|
||||
return thread->context.vector_registers[id - UC_ARM64_REG_Q0][0];
|
||||
return thread_context.vector_registers[id - UC_ARM64_REG_Q0][0];
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
@ -243,16 +245,18 @@ static void RegWrite(std::size_t id, u64 val, Kernel::Thread* thread = nullptr)
|
||||
return;
|
||||
}
|
||||
|
||||
auto& thread_context = thread->GetContext();
|
||||
|
||||
if (id < SP_REGISTER) {
|
||||
thread->context.cpu_registers[id] = val;
|
||||
thread_context.cpu_registers[id] = val;
|
||||
} else if (id == SP_REGISTER) {
|
||||
thread->context.sp = val;
|
||||
thread_context.sp = val;
|
||||
} else if (id == PC_REGISTER) {
|
||||
thread->context.pc = val;
|
||||
thread_context.pc = val;
|
||||
} else if (id == PSTATE_REGISTER) {
|
||||
thread->context.pstate = static_cast<u32>(val);
|
||||
thread_context.pstate = static_cast<u32>(val);
|
||||
} else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) {
|
||||
thread->context.vector_registers[id - (PSTATE_REGISTER + 1)][0] = val;
|
||||
thread_context.vector_registers[id - (PSTATE_REGISTER + 1)][0] = val;
|
||||
}
|
||||
}
|
||||
|
||||
@ -595,7 +599,7 @@ static void HandleQuery() {
|
||||
for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) {
|
||||
const auto& threads = Core::System::GetInstance().Scheduler(core)->GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
val += fmt::format("{:x}", thread->GetThreadId());
|
||||
val += fmt::format("{:x}", thread->GetThreadID());
|
||||
val += ",";
|
||||
}
|
||||
}
|
||||
@ -612,7 +616,7 @@ static void HandleQuery() {
|
||||
for (const auto& thread : threads) {
|
||||
buffer +=
|
||||
fmt::format(R"*(<thread id="{:x}" core="{:d}" name="Thread {:x}"></thread>)*",
|
||||
thread->GetThreadId(), core, thread->GetThreadId());
|
||||
thread->GetThreadID(), core, thread->GetThreadID());
|
||||
}
|
||||
}
|
||||
buffer += "</threads>";
|
||||
@ -693,7 +697,7 @@ static void SendSignal(Kernel::Thread* thread, u32 signal, bool full = true) {
|
||||
}
|
||||
|
||||
if (thread) {
|
||||
buffer += fmt::format(";thread:{:x};", thread->GetThreadId());
|
||||
buffer += fmt::format(";thread:{:x};", thread->GetThreadID());
|
||||
}
|
||||
|
||||
SendReply(buffer.c_str());
|
||||
@ -857,7 +861,9 @@ static void WriteRegister() {
|
||||
}
|
||||
|
||||
// Update Unicorn context skipping scheduler, no running threads at this point
|
||||
Core::System::GetInstance().ArmInterface(current_core).LoadContext(current_thread->context);
|
||||
Core::System::GetInstance()
|
||||
.ArmInterface(current_core)
|
||||
.LoadContext(current_thread->GetContext());
|
||||
|
||||
SendReply("OK");
|
||||
}
|
||||
@ -886,7 +892,9 @@ static void WriteRegisters() {
|
||||
}
|
||||
|
||||
// Update Unicorn context skipping scheduler, no running threads at this point
|
||||
Core::System::GetInstance().ArmInterface(current_core).LoadContext(current_thread->context);
|
||||
Core::System::GetInstance()
|
||||
.ArmInterface(current_core)
|
||||
.LoadContext(current_thread->GetContext());
|
||||
|
||||
SendReply("OK");
|
||||
}
|
||||
@ -960,7 +968,9 @@ static void Step() {
|
||||
if (command_length > 1) {
|
||||
RegWrite(PC_REGISTER, GdbHexToLong(command_buffer + 1), current_thread);
|
||||
// Update Unicorn context skipping scheduler, no running threads at this point
|
||||
Core::System::GetInstance().ArmInterface(current_core).LoadContext(current_thread->context);
|
||||
Core::System::GetInstance()
|
||||
.ArmInterface(current_core)
|
||||
.LoadContext(current_thread->GetContext());
|
||||
}
|
||||
step_loop = true;
|
||||
halt_loop = true;
|
||||
|
@ -23,13 +23,13 @@ namespace AddressArbiter {
|
||||
// Performs actual address waiting logic.
|
||||
static ResultCode WaitForAddress(VAddr address, s64 timeout) {
|
||||
SharedPtr<Thread> current_thread = GetCurrentThread();
|
||||
current_thread->arb_wait_address = address;
|
||||
current_thread->status = ThreadStatus::WaitArb;
|
||||
current_thread->wakeup_callback = nullptr;
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
||||
current_thread->InvalidateWakeupCallback();
|
||||
|
||||
current_thread->WakeAfterDelay(timeout);
|
||||
|
||||
Core::System::GetInstance().CpuCore(current_thread->processor_id).PrepareReschedule();
|
||||
Core::System::GetInstance().CpuCore(current_thread->GetProcessorID()).PrepareReschedule();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
@ -39,10 +39,10 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address)
|
||||
std::vector<SharedPtr<Thread>>& waiting_threads,
|
||||
VAddr arb_addr) {
|
||||
const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
|
||||
auto& thread_list = scheduler->GetThreadList();
|
||||
const auto& thread_list = scheduler->GetThreadList();
|
||||
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->arb_wait_address == arb_addr)
|
||||
for (const auto& thread : thread_list) {
|
||||
if (thread->GetArbiterWaitAddress() == arb_addr)
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
};
|
||||
@ -57,7 +57,7 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address)
|
||||
// Sort them by priority, such that the highest priority ones come first.
|
||||
std::sort(threads.begin(), threads.end(),
|
||||
[](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
|
||||
return lhs->current_priority < rhs->current_priority;
|
||||
return lhs->GetPriority() < rhs->GetPriority();
|
||||
});
|
||||
|
||||
return threads;
|
||||
@ -73,9 +73,9 @@ static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num
|
||||
|
||||
// Signal the waiting threads.
|
||||
for (std::size_t i = 0; i < last; i++) {
|
||||
ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb);
|
||||
ASSERT(waiting_threads[i]->GetStatus() == ThreadStatus::WaitArb);
|
||||
waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
|
||||
waiting_threads[i]->arb_wait_address = 0;
|
||||
waiting_threads[i]->SetArbiterWaitAddress(0);
|
||||
waiting_threads[i]->ResumeFromWait();
|
||||
}
|
||||
}
|
||||
|
@ -42,14 +42,14 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
|
||||
Kernel::SharedPtr<Kernel::Event> event) {
|
||||
|
||||
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
|
||||
thread->wakeup_callback = [context = *this, callback](
|
||||
thread->SetWakeupCallback([context = *this, callback](
|
||||
ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index) mutable -> bool {
|
||||
ASSERT(thread->status == ThreadStatus::WaitHLEEvent);
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitHLEEvent);
|
||||
callback(thread, context, reason);
|
||||
context.WriteToOutgoingCommandBuffer(*thread);
|
||||
return true;
|
||||
};
|
||||
});
|
||||
|
||||
if (!event) {
|
||||
// Create event if not provided
|
||||
@ -59,8 +59,8 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
|
||||
}
|
||||
|
||||
event->Clear();
|
||||
thread->status = ThreadStatus::WaitHLEEvent;
|
||||
thread->wait_objects = {event};
|
||||
thread->SetStatus(ThreadStatus::WaitHLEEvent);
|
||||
thread->SetWaitObjects({event});
|
||||
event->AddWaitingThread(thread);
|
||||
|
||||
if (timeout > 0) {
|
||||
@ -209,7 +209,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb
|
||||
|
||||
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) {
|
||||
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmdbuf;
|
||||
Memory::ReadBlock(*thread.owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(),
|
||||
Memory::ReadBlock(*thread.GetOwnerProcess(), thread.GetTLSAddress(), dst_cmdbuf.data(),
|
||||
dst_cmdbuf.size() * sizeof(u32));
|
||||
|
||||
// The header was already built in the internal command buffer. Attempt to parse it to verify
|
||||
@ -268,7 +268,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
|
||||
}
|
||||
|
||||
// Copy the translated command buffer back into the thread's command buffer area.
|
||||
Memory::WriteBlock(*thread.owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(),
|
||||
Memory::WriteBlock(*thread.GetOwnerProcess(), thread.GetTLSAddress(), dst_cmdbuf.data(),
|
||||
dst_cmdbuf.size() * sizeof(u32));
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
|
@ -46,40 +46,40 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_
|
||||
|
||||
bool resume = true;
|
||||
|
||||
if (thread->status == ThreadStatus::WaitSynchAny ||
|
||||
thread->status == ThreadStatus::WaitSynchAll ||
|
||||
thread->status == ThreadStatus::WaitHLEEvent) {
|
||||
if (thread->GetStatus() == ThreadStatus::WaitSynchAny ||
|
||||
thread->GetStatus() == ThreadStatus::WaitSynchAll ||
|
||||
thread->GetStatus() == ThreadStatus::WaitHLEEvent) {
|
||||
// Remove the thread from each of its waiting objects' waitlists
|
||||
for (auto& object : thread->wait_objects) {
|
||||
for (const auto& object : thread->GetWaitObjects()) {
|
||||
object->RemoveWaitingThread(thread.get());
|
||||
}
|
||||
thread->wait_objects.clear();
|
||||
thread->ClearWaitObjects();
|
||||
|
||||
// Invoke the wakeup callback before clearing the wait objects
|
||||
if (thread->wakeup_callback) {
|
||||
resume = thread->wakeup_callback(ThreadWakeupReason::Timeout, thread, nullptr, 0);
|
||||
if (thread->HasWakeupCallback()) {
|
||||
resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (thread->mutex_wait_address != 0 || thread->condvar_wait_address != 0 ||
|
||||
thread->wait_handle) {
|
||||
ASSERT(thread->status == ThreadStatus::WaitMutex);
|
||||
thread->mutex_wait_address = 0;
|
||||
thread->condvar_wait_address = 0;
|
||||
thread->wait_handle = 0;
|
||||
if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 ||
|
||||
thread->GetWaitHandle() != 0) {
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
|
||||
thread->SetMutexWaitAddress(0);
|
||||
thread->SetCondVarWaitAddress(0);
|
||||
thread->SetWaitHandle(0);
|
||||
|
||||
auto lock_owner = thread->lock_owner;
|
||||
auto* const lock_owner = thread->GetLockOwner();
|
||||
// Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance
|
||||
// and don't have a lock owner unless SignalProcessWideKey was called first and the thread
|
||||
// wasn't awakened due to the mutex already being acquired.
|
||||
if (lock_owner) {
|
||||
if (lock_owner != nullptr) {
|
||||
lock_owner->RemoveMutexWaiter(thread);
|
||||
}
|
||||
}
|
||||
|
||||
if (thread->arb_wait_address != 0) {
|
||||
ASSERT(thread->status == ThreadStatus::WaitArb);
|
||||
thread->arb_wait_address = 0;
|
||||
if (thread->GetArbiterWaitAddress() != 0) {
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitArb);
|
||||
thread->SetArbiterWaitAddress(0);
|
||||
}
|
||||
|
||||
if (resume) {
|
||||
|
@ -28,11 +28,11 @@ static std::pair<SharedPtr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
|
||||
SharedPtr<Thread> highest_priority_thread;
|
||||
u32 num_waiters = 0;
|
||||
|
||||
for (auto& thread : current_thread->wait_mutex_threads) {
|
||||
if (thread->mutex_wait_address != mutex_addr)
|
||||
for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
|
||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
||||
continue;
|
||||
|
||||
ASSERT(thread->status == ThreadStatus::WaitMutex);
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
|
||||
|
||||
++num_waiters;
|
||||
if (highest_priority_thread == nullptr ||
|
||||
@ -47,12 +47,12 @@ static std::pair<SharedPtr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
|
||||
/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
|
||||
static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_thread,
|
||||
SharedPtr<Thread> new_owner) {
|
||||
auto threads = current_thread->wait_mutex_threads;
|
||||
for (auto& thread : threads) {
|
||||
if (thread->mutex_wait_address != mutex_addr)
|
||||
const auto& threads = current_thread->GetMutexWaitingThreads();
|
||||
for (const auto& thread : threads) {
|
||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
||||
continue;
|
||||
|
||||
ASSERT(thread->lock_owner == current_thread);
|
||||
ASSERT(thread->GetLockOwner() == current_thread);
|
||||
current_thread->RemoveMutexWaiter(thread);
|
||||
if (new_owner != thread)
|
||||
new_owner->AddMutexWaiter(thread);
|
||||
@ -84,11 +84,11 @@ ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle ho
|
||||
return ERR_INVALID_HANDLE;
|
||||
|
||||
// Wait until the mutex is released
|
||||
GetCurrentThread()->mutex_wait_address = address;
|
||||
GetCurrentThread()->wait_handle = requesting_thread_handle;
|
||||
GetCurrentThread()->SetMutexWaitAddress(address);
|
||||
GetCurrentThread()->SetWaitHandle(requesting_thread_handle);
|
||||
|
||||
GetCurrentThread()->status = ThreadStatus::WaitMutex;
|
||||
GetCurrentThread()->wakeup_callback = nullptr;
|
||||
GetCurrentThread()->SetStatus(ThreadStatus::WaitMutex);
|
||||
GetCurrentThread()->InvalidateWakeupCallback();
|
||||
|
||||
// Update the lock holder thread's priority to prevent priority inversion.
|
||||
holding_thread->AddMutexWaiter(GetCurrentThread());
|
||||
@ -115,7 +115,7 @@ ResultCode Mutex::Release(VAddr address) {
|
||||
// Transfer the ownership of the mutex from the previous owner to the new one.
|
||||
TransferMutexOwnership(address, GetCurrentThread(), thread);
|
||||
|
||||
u32 mutex_value = thread->wait_handle;
|
||||
u32 mutex_value = thread->GetWaitHandle();
|
||||
|
||||
if (num_waiters >= 2) {
|
||||
// Notify the guest that there are still some threads waiting for the mutex
|
||||
@ -125,13 +125,13 @@ ResultCode Mutex::Release(VAddr address) {
|
||||
// Grant the mutex to the next waiting thread and resume it.
|
||||
Memory::Write32(address, mutex_value);
|
||||
|
||||
ASSERT(thread->status == ThreadStatus::WaitMutex);
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
|
||||
thread->ResumeFromWait();
|
||||
|
||||
thread->lock_owner = nullptr;
|
||||
thread->condvar_wait_address = 0;
|
||||
thread->mutex_wait_address = 0;
|
||||
thread->wait_handle = 0;
|
||||
thread->SetLockOwner(nullptr);
|
||||
thread->SetCondVarWaitAddress(0);
|
||||
thread->SetMutexWaitAddress(0);
|
||||
thread->SetWaitHandle(0);
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -144,15 +144,15 @@ void Process::PrepareForTermination() {
|
||||
|
||||
const auto stop_threads = [this](const std::vector<SharedPtr<Thread>>& thread_list) {
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->owner_process != this)
|
||||
if (thread->GetOwnerProcess() != this)
|
||||
continue;
|
||||
|
||||
if (thread == GetCurrentThread())
|
||||
continue;
|
||||
|
||||
// TODO(Subv): When are the other running/ready threads terminated?
|
||||
ASSERT_MSG(thread->status == ThreadStatus::WaitSynchAny ||
|
||||
thread->status == ThreadStatus::WaitSynchAll,
|
||||
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynchAny ||
|
||||
thread->GetStatus() == ThreadStatus::WaitSynchAll,
|
||||
"Exiting processes with non-waiting threads is currently unimplemented");
|
||||
|
||||
thread->Stop();
|
||||
|
@ -38,10 +38,10 @@ Thread* Scheduler::PopNextReadyThread() {
|
||||
Thread* next = nullptr;
|
||||
Thread* thread = GetCurrentThread();
|
||||
|
||||
if (thread && thread->status == ThreadStatus::Running) {
|
||||
if (thread && thread->GetStatus() == ThreadStatus::Running) {
|
||||
// We have to do better than the current thread.
|
||||
// This call returns null when that's not possible.
|
||||
next = ready_queue.pop_first_better(thread->current_priority);
|
||||
next = ready_queue.pop_first_better(thread->GetPriority());
|
||||
if (!next) {
|
||||
// Otherwise just keep going with the current thread
|
||||
next = thread;
|
||||
@ -58,22 +58,21 @@ void Scheduler::SwitchContext(Thread* new_thread) {
|
||||
|
||||
// Save context for previous thread
|
||||
if (previous_thread) {
|
||||
previous_thread->last_running_ticks = CoreTiming::GetTicks();
|
||||
cpu_core.SaveContext(previous_thread->context);
|
||||
cpu_core.SaveContext(previous_thread->GetContext());
|
||||
// Save the TPIDR_EL0 system register in case it was modified.
|
||||
previous_thread->tpidr_el0 = cpu_core.GetTPIDR_EL0();
|
||||
previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||
|
||||
if (previous_thread->status == ThreadStatus::Running) {
|
||||
if (previous_thread->GetStatus() == ThreadStatus::Running) {
|
||||
// This is only the case when a reschedule is triggered without the current thread
|
||||
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
|
||||
ready_queue.push_front(previous_thread->current_priority, previous_thread);
|
||||
previous_thread->status = ThreadStatus::Ready;
|
||||
ready_queue.push_front(previous_thread->GetPriority(), previous_thread);
|
||||
previous_thread->SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
}
|
||||
|
||||
// Load context of new thread
|
||||
if (new_thread) {
|
||||
ASSERT_MSG(new_thread->status == ThreadStatus::Ready,
|
||||
ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready,
|
||||
"Thread must be ready to become running.");
|
||||
|
||||
// Cancel any outstanding wakeup events for this thread
|
||||
@ -83,15 +82,16 @@ void Scheduler::SwitchContext(Thread* new_thread) {
|
||||
|
||||
current_thread = new_thread;
|
||||
|
||||
ready_queue.remove(new_thread->current_priority, new_thread);
|
||||
new_thread->status = ThreadStatus::Running;
|
||||
ready_queue.remove(new_thread->GetPriority(), new_thread);
|
||||
new_thread->SetStatus(ThreadStatus::Running);
|
||||
|
||||
if (previous_process != current_thread->owner_process) {
|
||||
Core::CurrentProcess() = current_thread->owner_process;
|
||||
const auto thread_owner_process = current_thread->GetOwnerProcess();
|
||||
if (previous_process != thread_owner_process) {
|
||||
Core::CurrentProcess() = thread_owner_process;
|
||||
SetCurrentPageTable(&Core::CurrentProcess()->VMManager().page_table);
|
||||
}
|
||||
|
||||
cpu_core.LoadContext(new_thread->context);
|
||||
cpu_core.LoadContext(new_thread->GetContext());
|
||||
cpu_core.SetTlsAddress(new_thread->GetTLSAddress());
|
||||
cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
|
||||
cpu_core.ClearExclusiveState();
|
||||
@ -136,14 +136,14 @@ void Scheduler::RemoveThread(Thread* thread) {
|
||||
void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
|
||||
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||
|
||||
ASSERT(thread->status == ThreadStatus::Ready);
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
|
||||
ready_queue.push_back(priority, thread);
|
||||
}
|
||||
|
||||
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
|
||||
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||
|
||||
ASSERT(thread->status == ThreadStatus::Ready);
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
|
||||
ready_queue.remove(priority, thread);
|
||||
}
|
||||
|
||||
@ -151,8 +151,8 @@ void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
|
||||
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||
|
||||
// If thread was ready, adjust queues
|
||||
if (thread->status == ThreadStatus::Ready)
|
||||
ready_queue.move(thread, thread->current_priority, priority);
|
||||
if (thread->GetStatus() == ThreadStatus::Ready)
|
||||
ready_queue.move(thread, thread->GetPriority(), priority);
|
||||
else
|
||||
ready_queue.prepare(priority);
|
||||
}
|
||||
|
@ -120,10 +120,10 @@ ResultCode ServerSession::HandleSyncRequest(SharedPtr<Thread> thread) {
|
||||
result = hle_handler->HandleSyncRequest(context);
|
||||
}
|
||||
|
||||
if (thread->status == ThreadStatus::Running) {
|
||||
if (thread->GetStatus() == ThreadStatus::Running) {
|
||||
// Put the thread to sleep until the server replies, it will be awoken in
|
||||
// svcReplyAndReceive for LLE servers.
|
||||
thread->status = ThreadStatus::WaitIPC;
|
||||
thread->SetStatus(ThreadStatus::WaitIPC);
|
||||
|
||||
if (hle_handler != nullptr) {
|
||||
// For HLE services, we put the request threads to sleep for a short duration to
|
||||
|
@ -156,7 +156,7 @@ static ResultCode GetThreadId(u32* thread_id, Handle thread_handle) {
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
*thread_id = thread->GetThreadId();
|
||||
*thread_id = thread->GetThreadID();
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
|
||||
/// Default thread wakeup callback for WaitSynchronization
|
||||
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index) {
|
||||
ASSERT(thread->status == ThreadStatus::WaitSynchAny);
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitSynchAny);
|
||||
|
||||
if (reason == ThreadWakeupReason::Timeout) {
|
||||
thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
|
||||
@ -204,10 +204,10 @@ static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64
|
||||
if (handle_count > MaxHandles)
|
||||
return ResultCode(ErrorModule::Kernel, ErrCodes::TooLarge);
|
||||
|
||||
auto thread = GetCurrentThread();
|
||||
auto* const thread = GetCurrentThread();
|
||||
|
||||
using ObjectPtr = SharedPtr<WaitObject>;
|
||||
std::vector<ObjectPtr> objects(handle_count);
|
||||
using ObjectPtr = Thread::ThreadWaitObjects::value_type;
|
||||
Thread::ThreadWaitObjects objects(handle_count);
|
||||
auto& kernel = Core::System::GetInstance().Kernel();
|
||||
|
||||
for (u64 i = 0; i < handle_count; ++i) {
|
||||
@ -244,14 +244,14 @@ static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64
|
||||
for (auto& object : objects)
|
||||
object->AddWaitingThread(thread);
|
||||
|
||||
thread->wait_objects = std::move(objects);
|
||||
thread->status = ThreadStatus::WaitSynchAny;
|
||||
thread->SetWaitObjects(std::move(objects));
|
||||
thread->SetStatus(ThreadStatus::WaitSynchAny);
|
||||
|
||||
// Create an event to wake the thread up after the specified nanosecond delay has passed
|
||||
thread->WakeAfterDelay(nano_seconds);
|
||||
thread->wakeup_callback = DefaultThreadWakeupCallback;
|
||||
thread->SetWakeupCallback(DefaultThreadWakeupCallback);
|
||||
|
||||
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule();
|
||||
Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
||||
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
@ -266,7 +266,7 @@ static ResultCode CancelSynchronization(Handle thread_handle) {
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
ASSERT(thread->status == ThreadStatus::WaitSynchAny);
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitSynchAny);
|
||||
thread->SetWaitSynchronizationResult(
|
||||
ResultCode(ErrorModule::Kernel, ErrCodes::SynchronizationCanceled));
|
||||
thread->ResumeFromWait();
|
||||
@ -425,7 +425,7 @@ static ResultCode GetThreadContext(VAddr thread_context, Handle handle) {
|
||||
}
|
||||
|
||||
const auto current_process = Core::CurrentProcess();
|
||||
if (thread->owner_process != current_process) {
|
||||
if (thread->GetOwnerProcess() != current_process) {
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
@ -433,7 +433,7 @@ static ResultCode GetThreadContext(VAddr thread_context, Handle handle) {
|
||||
return ERR_ALREADY_REGISTERED;
|
||||
}
|
||||
|
||||
Core::ARM_Interface::ThreadContext ctx = thread->context;
|
||||
Core::ARM_Interface::ThreadContext ctx = thread->GetContext();
|
||||
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
|
||||
ctx.pstate &= 0xFF0FFE20;
|
||||
|
||||
@ -479,14 +479,14 @@ static ResultCode SetThreadPriority(Handle handle, u32 priority) {
|
||||
|
||||
thread->SetPriority(priority);
|
||||
|
||||
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule();
|
||||
Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
/// Get which CPU core is executing the current thread
|
||||
static u32 GetCurrentProcessorNumber() {
|
||||
LOG_TRACE(Kernel_SVC, "called");
|
||||
return GetCurrentThread()->processor_id;
|
||||
return GetCurrentThread()->GetProcessorID();
|
||||
}
|
||||
|
||||
static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size,
|
||||
@ -622,10 +622,14 @@ static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, V
|
||||
CASCADE_RESULT(SharedPtr<Thread> thread,
|
||||
Thread::Create(kernel, name, entry_point, priority, arg, processor_id, stack_top,
|
||||
Core::CurrentProcess()));
|
||||
CASCADE_RESULT(thread->guest_handle, kernel.HandleTable().Create(thread));
|
||||
*out_handle = thread->guest_handle;
|
||||
const auto new_guest_handle = kernel.HandleTable().Create(thread);
|
||||
if (new_guest_handle.Failed()) {
|
||||
return new_guest_handle.Code();
|
||||
}
|
||||
thread->SetGuestHandle(*new_guest_handle);
|
||||
*out_handle = *new_guest_handle;
|
||||
|
||||
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule();
|
||||
Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
||||
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called entrypoint=0x{:08X} ({}), arg=0x{:08X}, stacktop=0x{:08X}, "
|
||||
@ -645,10 +649,10 @@ static ResultCode StartThread(Handle thread_handle) {
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
ASSERT(thread->status == ThreadStatus::Dormant);
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::Dormant);
|
||||
|
||||
thread->ResumeFromWait();
|
||||
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule();
|
||||
Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
@ -694,17 +698,17 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
|
||||
CASCADE_CODE(Mutex::Release(mutex_addr));
|
||||
|
||||
SharedPtr<Thread> current_thread = GetCurrentThread();
|
||||
current_thread->condvar_wait_address = condition_variable_addr;
|
||||
current_thread->mutex_wait_address = mutex_addr;
|
||||
current_thread->wait_handle = thread_handle;
|
||||
current_thread->status = ThreadStatus::WaitMutex;
|
||||
current_thread->wakeup_callback = nullptr;
|
||||
current_thread->SetCondVarWaitAddress(condition_variable_addr);
|
||||
current_thread->SetMutexWaitAddress(mutex_addr);
|
||||
current_thread->SetWaitHandle(thread_handle);
|
||||
current_thread->SetStatus(ThreadStatus::WaitMutex);
|
||||
current_thread->InvalidateWakeupCallback();
|
||||
|
||||
current_thread->WakeAfterDelay(nano_seconds);
|
||||
|
||||
// Note: Deliberately don't attempt to inherit the lock owner's priority.
|
||||
|
||||
Core::System::GetInstance().CpuCore(current_thread->processor_id).PrepareReschedule();
|
||||
Core::System::GetInstance().CpuCore(current_thread->GetProcessorID()).PrepareReschedule();
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@ -713,14 +717,14 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
|
||||
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
|
||||
condition_variable_addr, target);
|
||||
|
||||
auto RetrieveWaitingThreads = [](std::size_t core_index,
|
||||
std::vector<SharedPtr<Thread>>& waiting_threads,
|
||||
VAddr condvar_addr) {
|
||||
const auto RetrieveWaitingThreads = [](std::size_t core_index,
|
||||
std::vector<SharedPtr<Thread>>& waiting_threads,
|
||||
VAddr condvar_addr) {
|
||||
const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
|
||||
auto& thread_list = scheduler->GetThreadList();
|
||||
const auto& thread_list = scheduler->GetThreadList();
|
||||
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->condvar_wait_address == condvar_addr)
|
||||
for (const auto& thread : thread_list) {
|
||||
if (thread->GetCondVarWaitAddress() == condvar_addr)
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
};
|
||||
@ -734,7 +738,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
|
||||
// Sort them by priority, such that the highest priority ones come first.
|
||||
std::sort(waiting_threads.begin(), waiting_threads.end(),
|
||||
[](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
|
||||
return lhs->current_priority < rhs->current_priority;
|
||||
return lhs->GetPriority() < rhs->GetPriority();
|
||||
});
|
||||
|
||||
// Only process up to 'target' threads, unless 'target' is -1, in which case process
|
||||
@ -750,7 +754,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
|
||||
for (std::size_t index = 0; index < last; ++index) {
|
||||
auto& thread = waiting_threads[index];
|
||||
|
||||
ASSERT(thread->condvar_wait_address == condition_variable_addr);
|
||||
ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
|
||||
|
||||
std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
|
||||
|
||||
@ -759,42 +763,43 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
|
||||
// Atomically read the value of the mutex.
|
||||
u32 mutex_val = 0;
|
||||
do {
|
||||
monitor.SetExclusive(current_core, thread->mutex_wait_address);
|
||||
monitor.SetExclusive(current_core, thread->GetMutexWaitAddress());
|
||||
|
||||
// If the mutex is not yet acquired, acquire it.
|
||||
mutex_val = Memory::Read32(thread->mutex_wait_address);
|
||||
mutex_val = Memory::Read32(thread->GetMutexWaitAddress());
|
||||
|
||||
if (mutex_val != 0) {
|
||||
monitor.ClearExclusive();
|
||||
break;
|
||||
}
|
||||
} while (!monitor.ExclusiveWrite32(current_core, thread->mutex_wait_address,
|
||||
thread->wait_handle));
|
||||
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
|
||||
thread->GetWaitHandle()));
|
||||
|
||||
if (mutex_val == 0) {
|
||||
// We were able to acquire the mutex, resume this thread.
|
||||
ASSERT(thread->status == ThreadStatus::WaitMutex);
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
|
||||
thread->ResumeFromWait();
|
||||
|
||||
auto lock_owner = thread->lock_owner;
|
||||
if (lock_owner)
|
||||
auto* const lock_owner = thread->GetLockOwner();
|
||||
if (lock_owner != nullptr) {
|
||||
lock_owner->RemoveMutexWaiter(thread);
|
||||
}
|
||||
|
||||
thread->lock_owner = nullptr;
|
||||
thread->mutex_wait_address = 0;
|
||||
thread->condvar_wait_address = 0;
|
||||
thread->wait_handle = 0;
|
||||
thread->SetLockOwner(nullptr);
|
||||
thread->SetMutexWaitAddress(0);
|
||||
thread->SetCondVarWaitAddress(0);
|
||||
thread->SetWaitHandle(0);
|
||||
} else {
|
||||
// Atomically signal that the mutex now has a waiting thread.
|
||||
do {
|
||||
monitor.SetExclusive(current_core, thread->mutex_wait_address);
|
||||
monitor.SetExclusive(current_core, thread->GetMutexWaitAddress());
|
||||
|
||||
// Ensure that the mutex value is still what we expect.
|
||||
u32 value = Memory::Read32(thread->mutex_wait_address);
|
||||
u32 value = Memory::Read32(thread->GetMutexWaitAddress());
|
||||
// TODO(Subv): When this happens, the kernel just clears the exclusive state and
|
||||
// retries the initial read for this thread.
|
||||
ASSERT_MSG(mutex_val == value, "Unhandled synchronization primitive case");
|
||||
} while (!monitor.ExclusiveWrite32(current_core, thread->mutex_wait_address,
|
||||
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
|
||||
mutex_val | Mutex::MutexHasWaitersFlag));
|
||||
|
||||
// The mutex is already owned by some other thread, make this thread wait on it.
|
||||
@ -802,12 +807,12 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
|
||||
Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
|
||||
auto owner = kernel.HandleTable().Get<Thread>(owner_handle);
|
||||
ASSERT(owner);
|
||||
ASSERT(thread->status == ThreadStatus::WaitMutex);
|
||||
thread->wakeup_callback = nullptr;
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
|
||||
thread->InvalidateWakeupCallback();
|
||||
|
||||
owner->AddMutexWaiter(thread);
|
||||
|
||||
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule();
|
||||
Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
||||
}
|
||||
}
|
||||
|
||||
@ -913,8 +918,8 @@ static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask)
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
*core = thread->ideal_core;
|
||||
*mask = thread->affinity_mask;
|
||||
*core = thread->GetIdealCore();
|
||||
*mask = thread->GetAffinityMask();
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
@ -930,11 +935,13 @@ static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) {
|
||||
}
|
||||
|
||||
if (core == static_cast<u32>(THREADPROCESSORID_DEFAULT)) {
|
||||
ASSERT(thread->owner_process->GetDefaultProcessorID() !=
|
||||
static_cast<u8>(THREADPROCESSORID_DEFAULT));
|
||||
const u8 default_processor_id = thread->GetOwnerProcess()->GetDefaultProcessorID();
|
||||
|
||||
ASSERT(default_processor_id != static_cast<u8>(THREADPROCESSORID_DEFAULT));
|
||||
|
||||
// Set the target CPU to the one specified in the process' exheader.
|
||||
core = thread->owner_process->GetDefaultProcessorID();
|
||||
mask = 1ull << core;
|
||||
core = default_processor_id;
|
||||
mask = 1ULL << core;
|
||||
}
|
||||
|
||||
if (mask == 0) {
|
||||
@ -945,7 +952,7 @@ static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) {
|
||||
static constexpr u32 OnlyChangeMask = static_cast<u32>(-3);
|
||||
|
||||
if (core == OnlyChangeMask) {
|
||||
core = thread->ideal_core;
|
||||
core = thread->GetIdealCore();
|
||||
} else if (core >= Core::NUM_CPU_CORES && core != static_cast<u32>(-1)) {
|
||||
return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidProcessorId);
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ void Thread::Stop() {
|
||||
|
||||
void WaitCurrentThread_Sleep() {
|
||||
Thread* thread = GetCurrentThread();
|
||||
thread->status = ThreadStatus::WaitSleep;
|
||||
thread->SetStatus(ThreadStatus::WaitSleep);
|
||||
}
|
||||
|
||||
void ExitCurrentThread() {
|
||||
@ -269,9 +269,9 @@ SharedPtr<Thread> SetupMainThread(KernelCore& kernel, VAddr entry_point, u32 pri
|
||||
SharedPtr<Thread> thread = std::move(thread_res).Unwrap();
|
||||
|
||||
// Register 1 must be a handle to the main thread
|
||||
thread->guest_handle = kernel.HandleTable().Create(thread).Unwrap();
|
||||
|
||||
thread->context.cpu_registers[1] = thread->guest_handle;
|
||||
const Handle guest_handle = kernel.HandleTable().Create(thread).Unwrap();
|
||||
thread->SetGuestHandle(guest_handle);
|
||||
thread->GetContext().cpu_registers[1] = guest_handle;
|
||||
|
||||
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
||||
thread->ResumeFromWait();
|
||||
@ -299,6 +299,18 @@ VAddr Thread::GetCommandBufferAddress() const {
|
||||
return GetTLSAddress() + CommandHeaderOffset;
|
||||
}
|
||||
|
||||
void Thread::SetStatus(ThreadStatus new_status) {
|
||||
if (new_status == status) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (status == ThreadStatus::Running) {
|
||||
last_running_ticks = CoreTiming::GetTicks();
|
||||
}
|
||||
|
||||
status = new_status;
|
||||
}
|
||||
|
||||
void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
|
||||
if (thread->lock_owner == this) {
|
||||
// If the thread is already waiting for this thread to release the mutex, ensure that the
|
||||
@ -393,6 +405,18 @@ void Thread::ChangeCore(u32 core, u64 mask) {
|
||||
Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule();
|
||||
}
|
||||
|
||||
bool Thread::AllWaitObjectsReady() {
|
||||
return std::none_of(
|
||||
wait_objects.begin(), wait_objects.end(),
|
||||
[this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); });
|
||||
}
|
||||
|
||||
bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index) {
|
||||
ASSERT(wakeup_callback);
|
||||
return wakeup_callback(reason, std::move(thread), std::move(object), index);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
|
@ -65,6 +65,15 @@ public:
|
||||
using TLSMemory = std::vector<u8>;
|
||||
using TLSMemoryPtr = std::shared_ptr<TLSMemory>;
|
||||
|
||||
using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
|
||||
|
||||
using ThreadContext = Core::ARM_Interface::ThreadContext;
|
||||
|
||||
using ThreadWaitObjects = std::vector<SharedPtr<WaitObject>>;
|
||||
|
||||
using WakeupCallback = std::function<bool(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index)>;
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* @param kernel The kernel instance this thread will be created under.
|
||||
@ -105,6 +114,14 @@ public:
|
||||
return current_priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the thread's nominal priority.
|
||||
* @return The current thread's nominal priority.
|
||||
*/
|
||||
u32 GetNominalPriority() const {
|
||||
return nominal_priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the thread's current priority
|
||||
* @param priority The new priority
|
||||
@ -133,7 +150,7 @@ public:
|
||||
* Gets the thread's thread ID
|
||||
* @return The thread's ID
|
||||
*/
|
||||
u32 GetThreadId() const {
|
||||
u32 GetThreadID() const {
|
||||
return thread_id;
|
||||
}
|
||||
|
||||
@ -203,6 +220,11 @@ public:
|
||||
return tpidr_el0;
|
||||
}
|
||||
|
||||
/// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
|
||||
void SetTPIDR_EL0(u64 value) {
|
||||
tpidr_el0 = value;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the address of the current thread's command buffer, located in the TLS.
|
||||
* @returns VAddr of the thread's command buffer.
|
||||
@ -218,69 +240,193 @@ public:
|
||||
return status == ThreadStatus::WaitSynchAll;
|
||||
}
|
||||
|
||||
Core::ARM_Interface::ThreadContext context;
|
||||
ThreadContext& GetContext() {
|
||||
return context;
|
||||
}
|
||||
|
||||
u32 thread_id;
|
||||
const ThreadContext& GetContext() const {
|
||||
return context;
|
||||
}
|
||||
|
||||
ThreadStatus status;
|
||||
VAddr entry_point;
|
||||
VAddr stack_top;
|
||||
ThreadStatus GetStatus() const {
|
||||
return status;
|
||||
}
|
||||
|
||||
u32 nominal_priority; ///< Nominal thread priority, as set by the emulated application
|
||||
u32 current_priority; ///< Current thread priority, can be temporarily changed
|
||||
void SetStatus(ThreadStatus new_status);
|
||||
|
||||
u64 last_running_ticks; ///< CPU tick when thread was last running
|
||||
u64 GetLastRunningTicks() const {
|
||||
return last_running_ticks;
|
||||
}
|
||||
|
||||
s32 processor_id;
|
||||
s32 GetProcessorID() const {
|
||||
return processor_id;
|
||||
}
|
||||
|
||||
VAddr tls_address; ///< Virtual address of the Thread Local Storage of the thread
|
||||
u64 tpidr_el0; ///< TPIDR_EL0 read/write system register.
|
||||
SharedPtr<Process>& GetOwnerProcess() {
|
||||
return owner_process;
|
||||
}
|
||||
|
||||
SharedPtr<Process> owner_process; ///< Process that owns this thread
|
||||
const SharedPtr<Process>& GetOwnerProcess() const {
|
||||
return owner_process;
|
||||
}
|
||||
|
||||
const ThreadWaitObjects& GetWaitObjects() const {
|
||||
return wait_objects;
|
||||
}
|
||||
|
||||
void SetWaitObjects(ThreadWaitObjects objects) {
|
||||
wait_objects = std::move(objects);
|
||||
}
|
||||
|
||||
void ClearWaitObjects() {
|
||||
wait_objects.clear();
|
||||
}
|
||||
|
||||
/// Determines whether all the objects this thread is waiting on are ready.
|
||||
bool AllWaitObjectsReady();
|
||||
|
||||
const MutexWaitingThreads& GetMutexWaitingThreads() const {
|
||||
return wait_mutex_threads;
|
||||
}
|
||||
|
||||
Thread* GetLockOwner() const {
|
||||
return lock_owner.get();
|
||||
}
|
||||
|
||||
void SetLockOwner(SharedPtr<Thread> owner) {
|
||||
lock_owner = std::move(owner);
|
||||
}
|
||||
|
||||
VAddr GetCondVarWaitAddress() const {
|
||||
return condvar_wait_address;
|
||||
}
|
||||
|
||||
void SetCondVarWaitAddress(VAddr address) {
|
||||
condvar_wait_address = address;
|
||||
}
|
||||
|
||||
VAddr GetMutexWaitAddress() const {
|
||||
return mutex_wait_address;
|
||||
}
|
||||
|
||||
void SetMutexWaitAddress(VAddr address) {
|
||||
mutex_wait_address = address;
|
||||
}
|
||||
|
||||
Handle GetWaitHandle() const {
|
||||
return wait_handle;
|
||||
}
|
||||
|
||||
void SetWaitHandle(Handle handle) {
|
||||
wait_handle = handle;
|
||||
}
|
||||
|
||||
VAddr GetArbiterWaitAddress() const {
|
||||
return arb_wait_address;
|
||||
}
|
||||
|
||||
void SetArbiterWaitAddress(VAddr address) {
|
||||
arb_wait_address = address;
|
||||
}
|
||||
|
||||
void SetGuestHandle(Handle handle) {
|
||||
guest_handle = handle;
|
||||
}
|
||||
|
||||
bool HasWakeupCallback() const {
|
||||
return wakeup_callback != nullptr;
|
||||
}
|
||||
|
||||
void SetWakeupCallback(WakeupCallback callback) {
|
||||
wakeup_callback = std::move(callback);
|
||||
}
|
||||
|
||||
void InvalidateWakeupCallback() {
|
||||
SetWakeupCallback(nullptr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes the thread's wakeup callback.
|
||||
*
|
||||
* @pre A valid wakeup callback has been set. Violating this precondition
|
||||
* will cause an assertion to trigger.
|
||||
*/
|
||||
bool InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index);
|
||||
|
||||
u32 GetIdealCore() const {
|
||||
return ideal_core;
|
||||
}
|
||||
|
||||
u64 GetAffinityMask() const {
|
||||
return affinity_mask;
|
||||
}
|
||||
|
||||
private:
|
||||
explicit Thread(KernelCore& kernel);
|
||||
~Thread() override;
|
||||
|
||||
Core::ARM_Interface::ThreadContext context{};
|
||||
|
||||
u32 thread_id = 0;
|
||||
|
||||
ThreadStatus status = ThreadStatus::Dormant;
|
||||
|
||||
VAddr entry_point = 0;
|
||||
VAddr stack_top = 0;
|
||||
|
||||
u32 nominal_priority = 0; ///< Nominal thread priority, as set by the emulated application
|
||||
u32 current_priority = 0; ///< Current thread priority, can be temporarily changed
|
||||
|
||||
u64 last_running_ticks = 0; ///< CPU tick when thread was last running
|
||||
|
||||
s32 processor_id = 0;
|
||||
|
||||
VAddr tls_address = 0; ///< Virtual address of the Thread Local Storage of the thread
|
||||
u64 tpidr_el0 = 0; ///< TPIDR_EL0 read/write system register.
|
||||
|
||||
/// Process that owns this thread
|
||||
SharedPtr<Process> owner_process;
|
||||
|
||||
/// Objects that the thread is waiting on, in the same order as they were
|
||||
// passed to WaitSynchronization1/N.
|
||||
std::vector<SharedPtr<WaitObject>> wait_objects;
|
||||
/// passed to WaitSynchronization1/N.
|
||||
ThreadWaitObjects wait_objects;
|
||||
|
||||
/// List of threads that are waiting for a mutex that is held by this thread.
|
||||
std::vector<SharedPtr<Thread>> wait_mutex_threads;
|
||||
MutexWaitingThreads wait_mutex_threads;
|
||||
|
||||
/// Thread that owns the lock that this thread is waiting for.
|
||||
SharedPtr<Thread> lock_owner;
|
||||
|
||||
// If waiting on a ConditionVariable, this is the ConditionVariable address
|
||||
VAddr condvar_wait_address;
|
||||
VAddr mutex_wait_address; ///< If waiting on a Mutex, this is the mutex address
|
||||
Handle wait_handle; ///< The handle used to wait for the mutex.
|
||||
/// If waiting on a ConditionVariable, this is the ConditionVariable address
|
||||
VAddr condvar_wait_address = 0;
|
||||
/// If waiting on a Mutex, this is the mutex address
|
||||
VAddr mutex_wait_address = 0;
|
||||
/// The handle used to wait for the mutex.
|
||||
Handle wait_handle = 0;
|
||||
|
||||
// If waiting for an AddressArbiter, this is the address being waited on.
|
||||
/// If waiting for an AddressArbiter, this is the address being waited on.
|
||||
VAddr arb_wait_address{0};
|
||||
|
||||
std::string name;
|
||||
|
||||
/// Handle used by guest emulated application to access this thread
|
||||
Handle guest_handle;
|
||||
Handle guest_handle = 0;
|
||||
|
||||
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
|
||||
Handle callback_handle;
|
||||
Handle callback_handle = 0;
|
||||
|
||||
using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index);
|
||||
// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
|
||||
// was waiting via WaitSynchronizationN then the object will be the last object that became
|
||||
// available. In case of a timeout, the object will be nullptr.
|
||||
std::function<WakeupCallback> wakeup_callback;
|
||||
/// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
|
||||
/// was waiting via WaitSynchronizationN then the object will be the last object that became
|
||||
/// available. In case of a timeout, the object will be nullptr.
|
||||
WakeupCallback wakeup_callback;
|
||||
|
||||
std::shared_ptr<Scheduler> scheduler;
|
||||
|
||||
u32 ideal_core{0xFFFFFFFF};
|
||||
u64 affinity_mask{0x1};
|
||||
|
||||
private:
|
||||
explicit Thread(KernelCore& kernel);
|
||||
~Thread() override;
|
||||
|
||||
TLSMemoryPtr tls_memory = std::make_shared<TLSMemory>();
|
||||
|
||||
std::string name;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -35,13 +35,15 @@ SharedPtr<Thread> WaitObject::GetHighestPriorityReadyThread() {
|
||||
u32 candidate_priority = THREADPRIO_LOWEST + 1;
|
||||
|
||||
for (const auto& thread : waiting_threads) {
|
||||
const ThreadStatus thread_status = thread->GetStatus();
|
||||
|
||||
// The list of waiting threads must not contain threads that are not waiting to be awakened.
|
||||
ASSERT_MSG(thread->status == ThreadStatus::WaitSynchAny ||
|
||||
thread->status == ThreadStatus::WaitSynchAll ||
|
||||
thread->status == ThreadStatus::WaitHLEEvent,
|
||||
ASSERT_MSG(thread_status == ThreadStatus::WaitSynchAny ||
|
||||
thread_status == ThreadStatus::WaitSynchAll ||
|
||||
thread_status == ThreadStatus::WaitHLEEvent,
|
||||
"Inconsistent thread statuses in waiting_threads");
|
||||
|
||||
if (thread->current_priority >= candidate_priority)
|
||||
if (thread->GetPriority() >= candidate_priority)
|
||||
continue;
|
||||
|
||||
if (ShouldWait(thread.get()))
|
||||
@ -50,16 +52,13 @@ SharedPtr<Thread> WaitObject::GetHighestPriorityReadyThread() {
|
||||
// A thread is ready to run if it's either in ThreadStatus::WaitSynchAny or
|
||||
// in ThreadStatus::WaitSynchAll and the rest of the objects it is waiting on are ready.
|
||||
bool ready_to_run = true;
|
||||
if (thread->status == ThreadStatus::WaitSynchAll) {
|
||||
ready_to_run = std::none_of(thread->wait_objects.begin(), thread->wait_objects.end(),
|
||||
[&thread](const SharedPtr<WaitObject>& object) {
|
||||
return object->ShouldWait(thread.get());
|
||||
});
|
||||
if (thread_status == ThreadStatus::WaitSynchAll) {
|
||||
ready_to_run = thread->AllWaitObjectsReady();
|
||||
}
|
||||
|
||||
if (ready_to_run) {
|
||||
candidate = thread.get();
|
||||
candidate_priority = thread->current_priority;
|
||||
candidate_priority = thread->GetPriority();
|
||||
}
|
||||
}
|
||||
|
||||
@ -75,24 +74,24 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) {
|
||||
if (!thread->IsSleepingOnWaitAll()) {
|
||||
Acquire(thread.get());
|
||||
} else {
|
||||
for (auto& object : thread->wait_objects) {
|
||||
for (const auto& object : thread->GetWaitObjects()) {
|
||||
ASSERT(!object->ShouldWait(thread.get()));
|
||||
object->Acquire(thread.get());
|
||||
}
|
||||
}
|
||||
|
||||
std::size_t index = thread->GetWaitObjectIndex(this);
|
||||
const std::size_t index = thread->GetWaitObjectIndex(this);
|
||||
|
||||
for (auto& object : thread->wait_objects)
|
||||
for (const auto& object : thread->GetWaitObjects())
|
||||
object->RemoveWaitingThread(thread.get());
|
||||
thread->wait_objects.clear();
|
||||
thread->ClearWaitObjects();
|
||||
|
||||
thread->CancelWakeupTimer();
|
||||
|
||||
bool resume = true;
|
||||
|
||||
if (thread->wakeup_callback)
|
||||
resume = thread->wakeup_callback(ThreadWakeupReason::Signal, thread, this, index);
|
||||
if (thread->HasWakeupCallback())
|
||||
resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Signal, thread, this, index);
|
||||
|
||||
if (resume)
|
||||
thread->ResumeFromWait();
|
||||
|
@ -119,7 +119,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() cons
|
||||
std::vector<std::unique_ptr<WaitTreeItem>> list;
|
||||
|
||||
constexpr std::size_t BaseRegister = 29;
|
||||
u64 base_pointer = thread.context.cpu_registers[BaseRegister];
|
||||
u64 base_pointer = thread.GetContext().cpu_registers[BaseRegister];
|
||||
|
||||
while (base_pointer != 0) {
|
||||
u64 lr = Memory::Read64(base_pointer + sizeof(u64));
|
||||
@ -213,7 +213,7 @@ WaitTreeThread::~WaitTreeThread() = default;
|
||||
QString WaitTreeThread::GetText() const {
|
||||
const auto& thread = static_cast<const Kernel::Thread&>(object);
|
||||
QString status;
|
||||
switch (thread.status) {
|
||||
switch (thread.GetStatus()) {
|
||||
case Kernel::ThreadStatus::Running:
|
||||
status = tr("running");
|
||||
break;
|
||||
@ -246,15 +246,17 @@ QString WaitTreeThread::GetText() const {
|
||||
status = tr("dead");
|
||||
break;
|
||||
}
|
||||
QString pc_info = tr(" PC = 0x%1 LR = 0x%2")
|
||||
.arg(thread.context.pc, 8, 16, QLatin1Char('0'))
|
||||
.arg(thread.context.cpu_registers[30], 8, 16, QLatin1Char('0'));
|
||||
|
||||
const auto& context = thread.GetContext();
|
||||
const QString pc_info = tr(" PC = 0x%1 LR = 0x%2")
|
||||
.arg(context.pc, 8, 16, QLatin1Char('0'))
|
||||
.arg(context.cpu_registers[30], 8, 16, QLatin1Char('0'));
|
||||
return WaitTreeWaitObject::GetText() + pc_info + " (" + status + ") ";
|
||||
}
|
||||
|
||||
QColor WaitTreeThread::GetColor() const {
|
||||
const auto& thread = static_cast<const Kernel::Thread&>(object);
|
||||
switch (thread.status) {
|
||||
switch (thread.GetStatus()) {
|
||||
case Kernel::ThreadStatus::Running:
|
||||
return QColor(Qt::GlobalColor::darkGreen);
|
||||
case Kernel::ThreadStatus::Ready:
|
||||
@ -284,7 +286,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
|
||||
const auto& thread = static_cast<const Kernel::Thread&>(object);
|
||||
|
||||
QString processor;
|
||||
switch (thread.processor_id) {
|
||||
switch (thread.GetProcessorID()) {
|
||||
case Kernel::ThreadProcessorId::THREADPROCESSORID_DEFAULT:
|
||||
processor = tr("default");
|
||||
break;
|
||||
@ -292,32 +294,35 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
|
||||
case Kernel::ThreadProcessorId::THREADPROCESSORID_1:
|
||||
case Kernel::ThreadProcessorId::THREADPROCESSORID_2:
|
||||
case Kernel::ThreadProcessorId::THREADPROCESSORID_3:
|
||||
processor = tr("core %1").arg(thread.processor_id);
|
||||
processor = tr("core %1").arg(thread.GetProcessorID());
|
||||
break;
|
||||
default:
|
||||
processor = tr("Unknown processor %1").arg(thread.processor_id);
|
||||
processor = tr("Unknown processor %1").arg(thread.GetProcessorID());
|
||||
break;
|
||||
}
|
||||
|
||||
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
|
||||
list.push_back(std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.ideal_core)));
|
||||
list.push_back(
|
||||
std::make_unique<WaitTreeText>(tr("affinity mask = %1").arg(thread.affinity_mask)));
|
||||
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId())));
|
||||
std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.GetIdealCore())));
|
||||
list.push_back(
|
||||
std::make_unique<WaitTreeText>(tr("affinity mask = %1").arg(thread.GetAffinityMask())));
|
||||
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID())));
|
||||
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
|
||||
.arg(thread.current_priority)
|
||||
.arg(thread.nominal_priority)));
|
||||
.arg(thread.GetPriority())
|
||||
.arg(thread.GetNominalPriority())));
|
||||
list.push_back(std::make_unique<WaitTreeText>(
|
||||
tr("last running ticks = %1").arg(thread.last_running_ticks)));
|
||||
tr("last running ticks = %1").arg(thread.GetLastRunningTicks())));
|
||||
|
||||
if (thread.mutex_wait_address != 0)
|
||||
list.push_back(std::make_unique<WaitTreeMutexInfo>(thread.mutex_wait_address));
|
||||
else
|
||||
const VAddr mutex_wait_address = thread.GetMutexWaitAddress();
|
||||
if (mutex_wait_address != 0) {
|
||||
list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address));
|
||||
} else {
|
||||
list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex")));
|
||||
}
|
||||
|
||||
if (thread.status == Kernel::ThreadStatus::WaitSynchAny ||
|
||||
thread.status == Kernel::ThreadStatus::WaitSynchAll) {
|
||||
list.push_back(std::make_unique<WaitTreeObjectList>(thread.wait_objects,
|
||||
if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAny ||
|
||||
thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAll) {
|
||||
list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetWaitObjects(),
|
||||
thread.IsSleepingOnWaitAll()));
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user