mirror of
https://github.com/Lime3DS/Lime3DS
synced 2024-12-27 01:22:37 -06:00
Merge pull request #4716 from wwylele/client-is-known
HLE/IPC: HLEContext can memorize the client thread and use it for SleepClientThread
This commit is contained in:
commit
11754778bb
8 changed files with 16 additions and 22 deletions
|
@ -32,8 +32,7 @@ void SessionRequestHandler::ClientDisconnected(std::shared_ptr<ServerSession> se
|
|||
connected_sessions.end());
|
||||
}
|
||||
|
||||
std::shared_ptr<Event> HLERequestContext::SleepClientThread(std::shared_ptr<Thread> thread,
|
||||
const std::string& reason,
|
||||
std::shared_ptr<Event> HLERequestContext::SleepClientThread(const std::string& reason,
|
||||
std::chrono::nanoseconds timeout,
|
||||
WakeupCallback&& callback) {
|
||||
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
|
||||
|
@ -60,7 +59,7 @@ std::shared_ptr<Event> HLERequestContext::SleepClientThread(std::shared_ptr<Thre
|
|||
auto event = kernel.CreateEvent(Kernel::ResetType::OneShot, "HLE Pause Event: " + reason);
|
||||
thread->status = ThreadStatus::WaitHleEvent;
|
||||
thread->wait_objects = {event};
|
||||
event->AddWaitingThread(thread);
|
||||
event->AddWaitingThread(SharedFrom(thread));
|
||||
|
||||
if (timeout.count() > 0)
|
||||
thread->WakeAfterDelay(timeout.count());
|
||||
|
@ -68,8 +67,9 @@ std::shared_ptr<Event> HLERequestContext::SleepClientThread(std::shared_ptr<Thre
|
|||
return event;
|
||||
}
|
||||
|
||||
HLERequestContext::HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session)
|
||||
: kernel(kernel), session(std::move(session)) {
|
||||
HLERequestContext::HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session,
|
||||
Thread* thread)
|
||||
: kernel(kernel), session(std::move(session)), thread(thread) {
|
||||
cmd_buf[0] = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -158,7 +158,7 @@ private:
|
|||
*/
|
||||
class HLERequestContext {
|
||||
public:
|
||||
HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session);
|
||||
HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session, Thread* thread);
|
||||
~HLERequestContext();
|
||||
|
||||
/// Returns a pointer to the IPC command buffer for this request.
|
||||
|
@ -180,7 +180,6 @@ public:
|
|||
/**
|
||||
* Puts the specified guest thread to sleep until the returned event is signaled or until the
|
||||
* specified timeout expires.
|
||||
* @param thread Thread to be put to sleep.
|
||||
* @param reason Reason for pausing the thread, to be used for debugging purposes.
|
||||
* @param timeout Timeout in nanoseconds after which the thread will be awoken and the callback
|
||||
* invoked with a Timeout reason.
|
||||
|
@ -189,8 +188,7 @@ public:
|
|||
* was called.
|
||||
* @returns Event that when signaled will resume the thread and call the callback function.
|
||||
*/
|
||||
std::shared_ptr<Event> SleepClientThread(std::shared_ptr<Thread> thread,
|
||||
const std::string& reason,
|
||||
std::shared_ptr<Event> SleepClientThread(const std::string& reason,
|
||||
std::chrono::nanoseconds timeout,
|
||||
WakeupCallback&& callback);
|
||||
|
||||
|
@ -240,6 +238,7 @@ private:
|
|||
KernelSystem& kernel;
|
||||
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
|
||||
std::shared_ptr<ServerSession> session;
|
||||
Thread* thread;
|
||||
// TODO(yuriks): Check common usage of this and optimize size accordingly
|
||||
boost::container::small_vector<std::shared_ptr<Object>, 8> request_handles;
|
||||
// The static buffers will be created when the IPC request is translated.
|
||||
|
|
|
@ -72,7 +72,7 @@ ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread) {
|
|||
kernel.memory.ReadBlock(*current_process, thread->GetCommandBufferAddress(), cmd_buf.data(),
|
||||
cmd_buf.size() * sizeof(u32));
|
||||
|
||||
Kernel::HLERequestContext context(kernel, SharedFrom(this));
|
||||
Kernel::HLERequestContext context(kernel, SharedFrom(this), thread.get());
|
||||
context.PopulateFromIncomingCommandBuffer(cmd_buf.data(), *current_process);
|
||||
|
||||
hle_handler->HandleSyncRequest(context);
|
||||
|
|
|
@ -71,8 +71,7 @@ void File::Read(Kernel::HLERequestContext& ctx) {
|
|||
rb.PushMappedBuffer(buffer);
|
||||
|
||||
std::chrono::nanoseconds read_timeout_ns{backend->GetReadDelayNs(length)};
|
||||
ctx.SleepClientThread(Kernel::SharedFrom(system.Kernel().GetThreadManager().GetCurrentThread()),
|
||||
"file::read", read_timeout_ns,
|
||||
ctx.SleepClientThread("file::read", read_timeout_ns,
|
||||
[](std::shared_ptr<Kernel::Thread> /*thread*/,
|
||||
Kernel::HLERequestContext& /*ctx*/,
|
||||
Kernel::ThreadWakeupReason /*reason*/) {
|
||||
|
|
|
@ -71,8 +71,7 @@ void FS_USER::OpenFile(Kernel::HLERequestContext& ctx) {
|
|||
LOG_ERROR(Service_FS, "failed to get a handle for file {}", file_path.DebugStr());
|
||||
}
|
||||
|
||||
ctx.SleepClientThread(Kernel::SharedFrom(system.Kernel().GetThreadManager().GetCurrentThread()),
|
||||
"fs_user::open", open_timeout_ns,
|
||||
ctx.SleepClientThread("fs_user::open", open_timeout_ns,
|
||||
[](std::shared_ptr<Kernel::Thread> /*thread*/,
|
||||
Kernel::HLERequestContext& /*ctx*/,
|
||||
Kernel::ThreadWakeupReason /*reason*/) {
|
||||
|
@ -130,8 +129,7 @@ void FS_USER::OpenFileDirectly(Kernel::HLERequestContext& ctx) {
|
|||
file_path.DebugStr(), mode.hex, attributes);
|
||||
}
|
||||
|
||||
ctx.SleepClientThread(Kernel::SharedFrom(system.Kernel().GetThreadManager().GetCurrentThread()),
|
||||
"fs_user::open_directly", open_timeout_ns,
|
||||
ctx.SleepClientThread("fs_user::open_directly", open_timeout_ns,
|
||||
[](std::shared_ptr<Kernel::Thread> /*thread*/,
|
||||
Kernel::HLERequestContext& /*ctx*/,
|
||||
Kernel::ThreadWakeupReason /*reason*/) {
|
||||
|
|
|
@ -1179,7 +1179,6 @@ void NWM_UDS::ConnectToNetwork(Kernel::HLERequestContext& ctx, u16 command_id,
|
|||
static constexpr std::chrono::nanoseconds UDSConnectionTimeout{300000000};
|
||||
|
||||
connection_event = ctx.SleepClientThread(
|
||||
Kernel::SharedFrom(system.Kernel().GetThreadManager().GetCurrentThread()),
|
||||
"uds::ConnectToNetwork", UDSConnectionTimeout,
|
||||
[command_id](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
|
||||
Kernel::ThreadWakeupReason reason) {
|
||||
|
|
|
@ -127,9 +127,8 @@ void SRV::GetServiceHandle(Kernel::HLERequestContext& ctx) {
|
|||
if (client_port.Failed()) {
|
||||
if (wait_until_available && client_port.Code() == ERR_SERVICE_NOT_REGISTERED) {
|
||||
LOG_INFO(Service_SRV, "called service={} delayed", name);
|
||||
std::shared_ptr<Kernel::Event> get_service_handle_event = ctx.SleepClientThread(
|
||||
Kernel::SharedFrom(system.Kernel().GetThreadManager().GetCurrentThread()),
|
||||
"GetServiceHandle", std::chrono::nanoseconds(-1), get_handle);
|
||||
std::shared_ptr<Kernel::Event> get_service_handle_event =
|
||||
ctx.SleepClientThread("GetServiceHandle", std::chrono::nanoseconds(-1), get_handle);
|
||||
get_service_handle_delayed_map[name] = std::move(get_service_handle_event);
|
||||
return;
|
||||
} else {
|
||||
|
|
|
@ -25,7 +25,7 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
|||
Memory::MemorySystem memory;
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0);
|
||||
auto session = std::get<std::shared_ptr<ServerSession>>(kernel.CreateSessionPair());
|
||||
HLERequestContext context(kernel, std::move(session));
|
||||
HLERequestContext context(kernel, std::move(session), nullptr);
|
||||
|
||||
auto process = kernel.CreateProcess(kernel.CreateCodeSet("", 0));
|
||||
|
||||
|
@ -237,7 +237,7 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
|||
Memory::MemorySystem memory;
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0);
|
||||
auto session = std::get<std::shared_ptr<ServerSession>>(kernel.CreateSessionPair());
|
||||
HLERequestContext context(kernel, std::move(session));
|
||||
HLERequestContext context(kernel, std::move(session), nullptr);
|
||||
|
||||
auto process = kernel.CreateProcess(kernel.CreateCodeSet("", 0));
|
||||
auto* input = context.CommandBuffer();
|
||||
|
|
Loading…
Reference in a new issue