mirror of
https://github.com/yuzu-emu/yuzu-android
synced 2024-12-26 20:51:20 -08:00
nvflinger: Fix for BufferQueue event handling.
This commit is contained in:
parent
f00ca69a81
commit
170e19d4ea
@ -29,7 +29,8 @@ void SessionRequestHandler::ClientDisconnected(SharedPtr<ServerSession> server_s
|
|||||||
|
|
||||||
SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
|
SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
|
||||||
const std::string& reason, u64 timeout,
|
const std::string& reason, u64 timeout,
|
||||||
WakeupCallback&& callback) {
|
WakeupCallback&& callback,
|
||||||
|
Kernel::SharedPtr<Kernel::Event> event) {
|
||||||
|
|
||||||
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
|
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
|
||||||
thread->wakeup_callback =
|
thread->wakeup_callback =
|
||||||
@ -41,7 +42,12 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
|
|||||||
return true;
|
return true;
|
||||||
};
|
};
|
||||||
|
|
||||||
auto event = Kernel::Event::Create(Kernel::ResetType::OneShot, "HLE Pause Event: " + reason);
|
if (!event) {
|
||||||
|
// Create event if not provided
|
||||||
|
event = Kernel::Event::Create(Kernel::ResetType::OneShot, "HLE Pause Event: " + reason);
|
||||||
|
}
|
||||||
|
|
||||||
|
event->Clear();
|
||||||
thread->status = THREADSTATUS_WAIT_HLE_EVENT;
|
thread->status = THREADSTATUS_WAIT_HLE_EVENT;
|
||||||
thread->wait_objects = {event};
|
thread->wait_objects = {event};
|
||||||
event->AddWaitingThread(thread);
|
event->AddWaitingThread(thread);
|
||||||
|
@ -118,10 +118,12 @@ public:
|
|||||||
* @param callback Callback to be invoked when the thread is resumed. This callback must write
|
* @param callback Callback to be invoked when the thread is resumed. This callback must write
|
||||||
* the entire command response once again, regardless of the state of it before this function
|
* the entire command response once again, regardless of the state of it before this function
|
||||||
* was called.
|
* was called.
|
||||||
|
* @param event Event to use to wake up the thread. If unspecified, an event will be created.
|
||||||
* @returns Event that when signaled will resume the thread and call the callback function.
|
* @returns Event that when signaled will resume the thread and call the callback function.
|
||||||
*/
|
*/
|
||||||
SharedPtr<Event> SleepClientThread(SharedPtr<Thread> thread, const std::string& reason,
|
SharedPtr<Event> SleepClientThread(SharedPtr<Thread> thread, const std::string& reason,
|
||||||
u64 timeout, WakeupCallback&& callback);
|
u64 timeout, WakeupCallback&& callback,
|
||||||
|
Kernel::SharedPtr<Kernel::Event> event = nullptr);
|
||||||
|
|
||||||
void ParseCommandBuffer(u32_le* src_cmdbuf, bool incoming);
|
void ParseCommandBuffer(u32_le* src_cmdbuf, bool incoming);
|
||||||
|
|
||||||
|
@ -13,8 +13,8 @@ namespace Service {
|
|||||||
namespace NVFlinger {
|
namespace NVFlinger {
|
||||||
|
|
||||||
BufferQueue::BufferQueue(u32 id, u64 layer_id) : id(id), layer_id(layer_id) {
|
BufferQueue::BufferQueue(u32 id, u64 layer_id) : id(id), layer_id(layer_id) {
|
||||||
native_handle = Kernel::Event::Create(Kernel::ResetType::OneShot, "BufferQueue NativeHandle");
|
buffer_wait_event =
|
||||||
native_handle->Signal();
|
Kernel::Event::Create(Kernel::ResetType::Sticky, "BufferQueue NativeHandle");
|
||||||
}
|
}
|
||||||
|
|
||||||
void BufferQueue::SetPreallocatedBuffer(u32 slot, IGBPBuffer& igbp_buffer) {
|
void BufferQueue::SetPreallocatedBuffer(u32 slot, IGBPBuffer& igbp_buffer) {
|
||||||
@ -26,10 +26,7 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, IGBPBuffer& igbp_buffer) {
|
|||||||
LOG_WARNING(Service, "Adding graphics buffer {}", slot);
|
LOG_WARNING(Service, "Adding graphics buffer {}", slot);
|
||||||
|
|
||||||
queue.emplace_back(buffer);
|
queue.emplace_back(buffer);
|
||||||
|
|
||||||
if (buffer_wait_event) {
|
|
||||||
buffer_wait_event->Signal();
|
buffer_wait_event->Signal();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
boost::optional<u32> BufferQueue::DequeueBuffer(u32 width, u32 height) {
|
boost::optional<u32> BufferQueue::DequeueBuffer(u32 width, u32 height) {
|
||||||
@ -48,8 +45,6 @@ boost::optional<u32> BufferQueue::DequeueBuffer(u32 width, u32 height) {
|
|||||||
return boost::none;
|
return boost::none;
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer_wait_event = nullptr;
|
|
||||||
|
|
||||||
itr->status = Buffer::Status::Dequeued;
|
itr->status = Buffer::Status::Dequeued;
|
||||||
return itr->slot;
|
return itr->slot;
|
||||||
}
|
}
|
||||||
@ -88,9 +83,7 @@ void BufferQueue::ReleaseBuffer(u32 slot) {
|
|||||||
ASSERT(itr->status == Buffer::Status::Acquired);
|
ASSERT(itr->status == Buffer::Status::Acquired);
|
||||||
itr->status = Buffer::Status::Free;
|
itr->status = Buffer::Status::Free;
|
||||||
|
|
||||||
if (buffer_wait_event) {
|
|
||||||
buffer_wait_event->Signal();
|
buffer_wait_event->Signal();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 BufferQueue::Query(QueryType type) {
|
u32 BufferQueue::Query(QueryType type) {
|
||||||
@ -106,10 +99,5 @@ u32 BufferQueue::Query(QueryType type) {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void BufferQueue::SetBufferWaitEvent(Kernel::SharedPtr<Kernel::Event>&& wait_event) {
|
|
||||||
ASSERT_MSG(!buffer_wait_event, "buffer_wait_event only supports a single waiting thread!");
|
|
||||||
buffer_wait_event = std::move(wait_event);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace NVFlinger
|
} // namespace NVFlinger
|
||||||
} // namespace Service
|
} // namespace Service
|
||||||
|
@ -77,14 +77,13 @@ public:
|
|||||||
boost::optional<const Buffer&> AcquireBuffer();
|
boost::optional<const Buffer&> AcquireBuffer();
|
||||||
void ReleaseBuffer(u32 slot);
|
void ReleaseBuffer(u32 slot);
|
||||||
u32 Query(QueryType type);
|
u32 Query(QueryType type);
|
||||||
void SetBufferWaitEvent(Kernel::SharedPtr<Kernel::Event>&& wait_event);
|
|
||||||
|
|
||||||
u32 GetId() const {
|
u32 GetId() const {
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::SharedPtr<Kernel::Event> GetNativeHandle() const {
|
Kernel::SharedPtr<Kernel::Event> GetBufferWaitEvent() const {
|
||||||
return native_handle;
|
return buffer_wait_event;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -92,9 +91,6 @@ private:
|
|||||||
u64 layer_id;
|
u64 layer_id;
|
||||||
|
|
||||||
std::vector<Buffer> queue;
|
std::vector<Buffer> queue;
|
||||||
Kernel::SharedPtr<Kernel::Event> native_handle;
|
|
||||||
|
|
||||||
/// Used to signal waiting thread when no buffers are available
|
|
||||||
Kernel::SharedPtr<Kernel::Event> buffer_wait_event;
|
Kernel::SharedPtr<Kernel::Event> buffer_wait_event;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -152,9 +152,6 @@ void NVFlinger::Compose() {
|
|||||||
igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride, buffer->transform);
|
igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride, buffer->transform);
|
||||||
|
|
||||||
buffer_queue->ReleaseBuffer(buffer->slot);
|
buffer_queue->ReleaseBuffer(buffer->slot);
|
||||||
|
|
||||||
// TODO(Subv): Figure out when we should actually signal this event.
|
|
||||||
buffer_queue->GetNativeHandle()->Signal();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -495,7 +495,7 @@ private:
|
|||||||
ctx.WriteBuffer(response.Serialize());
|
ctx.WriteBuffer(response.Serialize());
|
||||||
} else {
|
} else {
|
||||||
// Wait the current thread until a buffer becomes available
|
// Wait the current thread until a buffer becomes available
|
||||||
auto wait_event = ctx.SleepClientThread(
|
ctx.SleepClientThread(
|
||||||
Kernel::GetCurrentThread(), "IHOSBinderDriver::DequeueBuffer", -1,
|
Kernel::GetCurrentThread(), "IHOSBinderDriver::DequeueBuffer", -1,
|
||||||
[=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
|
[=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
|
||||||
ThreadWakeupReason reason) {
|
ThreadWakeupReason reason) {
|
||||||
@ -506,8 +506,8 @@ private:
|
|||||||
ctx.WriteBuffer(response.Serialize());
|
ctx.WriteBuffer(response.Serialize());
|
||||||
IPC::ResponseBuilder rb{ctx, 2};
|
IPC::ResponseBuilder rb{ctx, 2};
|
||||||
rb.Push(RESULT_SUCCESS);
|
rb.Push(RESULT_SUCCESS);
|
||||||
});
|
},
|
||||||
buffer_queue->SetBufferWaitEvent(std::move(wait_event));
|
buffer_queue->GetBufferWaitEvent());
|
||||||
}
|
}
|
||||||
} else if (transaction == TransactionId::RequestBuffer) {
|
} else if (transaction == TransactionId::RequestBuffer) {
|
||||||
IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()};
|
IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()};
|
||||||
@ -565,7 +565,7 @@ private:
|
|||||||
LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown);
|
LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown);
|
||||||
IPC::ResponseBuilder rb{ctx, 2, 1};
|
IPC::ResponseBuilder rb{ctx, 2, 1};
|
||||||
rb.Push(RESULT_SUCCESS);
|
rb.Push(RESULT_SUCCESS);
|
||||||
rb.PushCopyObjects(buffer_queue->GetNativeHandle());
|
rb.PushCopyObjects(buffer_queue->GetBufferWaitEvent());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<NVFlinger::NVFlinger> nv_flinger;
|
std::shared_ptr<NVFlinger::NVFlinger> nv_flinger;
|
||||||
|
Loading…
Reference in New Issue
Block a user