mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-06-01 07:18:22 -04:00
htcs: hook up HtcsService to rpc client
This commit is contained in:
parent
0c791f2279
commit
7667104961
18 changed files with 733 additions and 77 deletions
|
@ -42,6 +42,7 @@ namespace ams::htc::server::rpc {
|
|||
m_task_id_free_list(g_task_id_free_list),
|
||||
m_task_table(g_task_table),
|
||||
m_task_active(),
|
||||
m_is_htcs_task(),
|
||||
m_task_queue(),
|
||||
m_cancelled(false),
|
||||
m_thread_running(false)
|
||||
|
@ -63,6 +64,7 @@ namespace ams::htc::server::rpc {
|
|||
m_task_id_free_list(g_task_id_free_list),
|
||||
m_task_table(g_task_table),
|
||||
m_task_active(),
|
||||
m_is_htcs_task(),
|
||||
m_task_queue(),
|
||||
m_cancelled(false),
|
||||
m_thread_running(false)
|
||||
|
@ -173,8 +175,7 @@ namespace ams::htc::server::rpc {
|
|||
/* Cancel all tasks. */
|
||||
for (size_t i = 0; i < MaxRpcCount; ++i) {
|
||||
if (m_task_active[i]) {
|
||||
/* TODO: enum member */
|
||||
m_task_table.Get<Task>(i)->Cancel(static_cast<RpcTaskCancelReason>(2));
|
||||
m_task_table.Get<Task>(i)->Cancel(RpcTaskCancelReason::ClientFinalized);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -235,6 +236,7 @@ namespace ams::htc::server::rpc {
|
|||
/* If the task is canceled, free it. */
|
||||
if (task->GetTaskState() == RpcTaskState::Cancelled) {
|
||||
m_task_active[header->task_id] = false;
|
||||
m_is_htcs_task[header->task_id] = false;
|
||||
m_task_table.Delete(header->task_id);
|
||||
m_task_id_free_list.Free(header->task_id);
|
||||
continue;
|
||||
|
@ -340,4 +342,128 @@ namespace ams::htc::server::rpc {
|
|||
return ResultSuccess();
|
||||
}
|
||||
|
||||
void RpcClient::CancelBySocket(s32 handle) {
|
||||
/* Check if we need to cancel each task. */
|
||||
for (size_t i = 0; i < MaxRpcCount; ++i) {
|
||||
/* Lock ourselves. */
|
||||
std::scoped_lock lk(m_mutex);
|
||||
|
||||
/* Check that the task is active and is an htcs task. */
|
||||
if (!m_task_active[i] || !m_is_htcs_task[i]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get the htcs task. */
|
||||
auto *htcs_task = m_task_table.Get<htcs::impl::rpc::HtcsTask>(i);
|
||||
|
||||
/* Handle the case where the task handle is the one we're cancelling. */
|
||||
if (this->GetTaskHandle(i) == handle) {
|
||||
/* If the task is complete, free it. */
|
||||
if (htcs_task->GetTaskState() == RpcTaskState::Completed) {
|
||||
m_task_active[i] = false;
|
||||
m_is_htcs_task[i] = false;
|
||||
m_task_table.Delete(i);
|
||||
m_task_id_free_list.Free(i);
|
||||
} else {
|
||||
/* If the task is a send task, notify. */
|
||||
if (htcs_task->GetTaskType() == htcs::impl::rpc::HtcsTaskType::Send) {
|
||||
m_task_queue.Add(i, PacketCategory::Notification);
|
||||
}
|
||||
|
||||
/* Cancel the task. */
|
||||
htcs_task->Cancel(RpcTaskCancelReason::BySocket);
|
||||
}
|
||||
|
||||
/* The task has been cancelled, so we can move on. */
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Handle the case where the task is a select task. */
|
||||
if (htcs_task->GetTaskType() == htcs::impl::rpc::HtcsTaskType::Select) {
|
||||
/* Get the select task. */
|
||||
auto *select_task = m_task_table.Get<htcs::impl::rpc::SelectTask>(i);
|
||||
|
||||
/* Get the handle counts. */
|
||||
const auto num_read = select_task->GetReadHandleCount();
|
||||
const auto num_write = select_task->GetWriteHandleCount();
|
||||
const auto num_exception = select_task->GetExceptionHandleCount();
|
||||
const auto total = num_read + num_write + num_exception;
|
||||
|
||||
/* Get the handle array. */
|
||||
const auto *handles = select_task->GetHandles();
|
||||
|
||||
/* Check each handle. */
|
||||
for (auto handle_idx = 0; handle_idx < total; ++handle_idx) {
|
||||
if (handles[handle_idx] == handle) {
|
||||
/* If the select is complete, free it. */
|
||||
if (select_task->GetTaskState() == RpcTaskState::Completed) {
|
||||
m_task_active[i] = false;
|
||||
m_is_htcs_task[i] = false;
|
||||
m_task_table.Delete(i);
|
||||
m_task_id_free_list.Free(i);
|
||||
} else {
|
||||
/* Cancel the task. */
|
||||
select_task->Cancel(RpcTaskCancelReason::BySocket);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s32 RpcClient::GetTaskHandle(u32 task_id) {
|
||||
/* Check pre-conditions. */
|
||||
AMS_ASSERT(m_task_active[task_id]);
|
||||
AMS_ASSERT(m_is_htcs_task[task_id]);
|
||||
|
||||
/* Get the htcs task. */
|
||||
auto *task = m_task_table.Get<htcs::impl::rpc::HtcsTask>(task_id);
|
||||
|
||||
/* Check that the task has a handle. */
|
||||
if (!m_task_active[task_id] || !m_is_htcs_task[task_id] || task == nullptr) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Get the task's type. */
|
||||
const auto type = task->GetTaskType();
|
||||
|
||||
/* Check that the task is new enough. */
|
||||
if (task->GetVersion() == 3) {
|
||||
if (type == htcs::impl::rpc::HtcsTaskType::Receive || type == htcs::impl::rpc::HtcsTaskType::Send) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get the handle from the task. */
|
||||
switch (type) {
|
||||
case htcs::impl::rpc::HtcsTaskType::Receive:
|
||||
return static_cast<htcs::impl::rpc::ReceiveTask *>(task)->GetHandle();
|
||||
case htcs::impl::rpc::HtcsTaskType::Send:
|
||||
return static_cast<htcs::impl::rpc::SendTask *>(task)->GetHandle();
|
||||
case htcs::impl::rpc::HtcsTaskType::Shutdown:
|
||||
return static_cast<htcs::impl::rpc::ShutdownTask *>(task)->GetHandle();
|
||||
case htcs::impl::rpc::HtcsTaskType::Close:
|
||||
return -1;
|
||||
case htcs::impl::rpc::HtcsTaskType::Connect:
|
||||
return static_cast<htcs::impl::rpc::ConnectTask *>(task)->GetHandle();
|
||||
case htcs::impl::rpc::HtcsTaskType::Listen:
|
||||
return static_cast<htcs::impl::rpc::ListenTask *>(task)->GetHandle();
|
||||
case htcs::impl::rpc::HtcsTaskType::Accept:
|
||||
return static_cast<htcs::impl::rpc::AcceptTask *>(task)->GetServerHandle();
|
||||
case htcs::impl::rpc::HtcsTaskType::Socket:
|
||||
return -1;
|
||||
case htcs::impl::rpc::HtcsTaskType::Bind:
|
||||
return static_cast<htcs::impl::rpc::BindTask *>(task)->GetHandle();
|
||||
case htcs::impl::rpc::HtcsTaskType::Fcntl:
|
||||
return static_cast<htcs::impl::rpc::FcntlTask *>(task)->GetHandle();
|
||||
case htcs::impl::rpc::HtcsTaskType::ReceiveSmall:
|
||||
return static_cast<htcs::impl::rpc::ReceiveSmallTask *>(task)->GetHandle();
|
||||
case htcs::impl::rpc::HtcsTaskType::SendSmall:
|
||||
return static_cast<htcs::impl::rpc::SendSmallTask *>(task)->GetHandle();
|
||||
case htcs::impl::rpc::HtcsTaskType::Select:
|
||||
return -1;
|
||||
AMS_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "htc_rpc_task_table.hpp"
|
||||
#include "htc_rpc_task_queue.hpp"
|
||||
#include "htc_rpc_task_id_free_list.hpp"
|
||||
#include "../../../htcs/impl/rpc/htcs_rpc_tasks.hpp"
|
||||
|
||||
namespace ams::htc::server::rpc {
|
||||
|
||||
|
@ -61,6 +62,7 @@ namespace ams::htc::server::rpc {
|
|||
RpcTaskIdFreeList &m_task_id_free_list;
|
||||
RpcTaskTable &m_task_table;
|
||||
bool m_task_active[MaxRpcCount];
|
||||
bool m_is_htcs_task[MaxRpcCount];
|
||||
RpcTaskQueue m_task_queue;
|
||||
bool m_cancelled;
|
||||
bool m_thread_running;
|
||||
|
@ -104,10 +106,12 @@ namespace ams::htc::server::rpc {
|
|||
/* Create the new task. */
|
||||
T *task = m_task_table.New<T>(task_id);
|
||||
m_task_active[task_id] = true;
|
||||
m_is_htcs_task[task_id] = htcs::impl::rpc::IsHtcsTask<T>;
|
||||
|
||||
/* Ensure we clean up the task, if we fail after this. */
|
||||
auto task_guard = SCOPE_GUARD {
|
||||
m_task_active[task_id] = false;
|
||||
m_is_htcs_task[task_id] = false;
|
||||
m_task_table.Delete<T>(task_id);
|
||||
m_task_id_free_list.Free(task_id);
|
||||
};
|
||||
|
@ -134,6 +138,24 @@ namespace ams::htc::server::rpc {
|
|||
return ResultSuccess();
|
||||
}
|
||||
|
||||
template<typename T, size_t... Ix> requires IsRpcTask<T>
|
||||
ALWAYS_INLINE Result GetResultImpl(std::index_sequence<Ix...>, u32 task_id, RpcTaskResultType<T, Ix>... args) {
|
||||
/* Lock ourselves. */
|
||||
std::scoped_lock lk(m_mutex);
|
||||
|
||||
/* Get the task. */
|
||||
T *task = m_task_table.Get<T>(task_id);
|
||||
R_UNLESS(task != nullptr, htc::ResultInvalidTaskId());
|
||||
|
||||
/* Check that the task is completed. */
|
||||
R_UNLESS(task->GetTaskState() == RpcTaskState::Completed, htc::ResultTaskNotCompleted());
|
||||
|
||||
/* Get the task's result. */
|
||||
R_TRY(task->GetResult(args...));
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
template<typename T, size_t... Ix> requires IsRpcTask<T>
|
||||
ALWAYS_INLINE Result EndImpl(std::index_sequence<Ix...>, u32 task_id, RpcTaskResultType<T, Ix>... args) {
|
||||
/* Lock ourselves. */
|
||||
|
@ -146,6 +168,7 @@ namespace ams::htc::server::rpc {
|
|||
/* Ensure the task is freed if it needs to be, when we're done. */
|
||||
auto task_guard = SCOPE_GUARD {
|
||||
m_task_active[task_id] = false;
|
||||
m_is_htcs_task[task_id] = false;
|
||||
m_task_table.Delete<T>(task_id);
|
||||
m_task_id_free_list.Free(task_id);
|
||||
};
|
||||
|
@ -153,10 +176,10 @@ namespace ams::htc::server::rpc {
|
|||
/* If the task was cancelled, handle that. */
|
||||
if (task->GetTaskState() == RpcTaskState::Cancelled) {
|
||||
switch (task->GetTaskCancelReason()) {
|
||||
case RpcTaskCancelReason::One:
|
||||
case RpcTaskCancelReason::BySocket:
|
||||
task_guard.Cancel();
|
||||
return htc::ResultUnknown2021();
|
||||
case RpcTaskCancelReason::Two:
|
||||
return htc::ResultTaskCancelled();
|
||||
case RpcTaskCancelReason::ClientFinalized:
|
||||
return htc::ResultCancelled();
|
||||
case RpcTaskCancelReason::QueueNotAvailable:
|
||||
return htc::ResultTaskQueueNotAvailable();
|
||||
|
@ -169,20 +192,178 @@ namespace ams::htc::server::rpc {
|
|||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
s32 GetTaskHandle(u32 task_id);
|
||||
public:
|
||||
void Wait(u32 task_id) {
|
||||
os::WaitEvent(m_task_table.Get<Task>(task_id)->GetEvent());
|
||||
}
|
||||
|
||||
Handle DetachReadableHandle(u32 task_id) {
|
||||
return os::DetachReadableHandleOfSystemEvent(m_task_table.Get<Task>(task_id)->GetSystemEvent());
|
||||
}
|
||||
|
||||
void CancelBySocket(s32 handle);
|
||||
|
||||
template<typename T, typename... Args> requires (IsRpcTask<T> && sizeof...(Args) == std::tuple_size<RpcTaskArgumentsType<T>>::value)
|
||||
Result Begin(u32 *out_task_id, Args &&... args) {
|
||||
return this->BeginImpl<T>(std::make_index_sequence<std::tuple_size<RpcTaskArgumentsType<T>>::value>(), out_task_id, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<typename T, typename... Args> requires (IsRpcTask<T> && sizeof...(Args) == std::tuple_size<RpcTaskResultsType<T>>::value)
|
||||
Result GetResult(u32 task_id, Args &&... args) {
|
||||
return this->GetResultImpl<T>(std::make_index_sequence<std::tuple_size<RpcTaskResultsType<T>>::value>(), task_id, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<typename T, typename... Args> requires (IsRpcTask<T> && sizeof...(Args) == std::tuple_size<RpcTaskResultsType<T>>::value)
|
||||
Result End(u32 task_id, Args &&... args) {
|
||||
return this->EndImpl<T>(std::make_index_sequence<std::tuple_size<RpcTaskResultsType<T>>::value>(), task_id, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<typename T> requires IsRpcTask<T>
|
||||
Result VerifyTaskIdWitHandle(u32 task_id, s32 handle) {
|
||||
/* Lock ourselves. */
|
||||
std::scoped_lock lk(m_mutex);
|
||||
|
||||
/* Get the task. */
|
||||
T *task = m_task_table.Get<T>(task_id);
|
||||
R_UNLESS(task != nullptr, htc::ResultInvalidTaskId());
|
||||
|
||||
/* Check the task handle. */
|
||||
R_UNLESS(task->GetHandle() == handle, htc::ResultInvalidTaskId());
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
template<typename T> requires IsRpcTask<T>
|
||||
Result Notify(u32 task_id) {
|
||||
/* Lock ourselves. */
|
||||
std::scoped_lock lk(m_mutex);
|
||||
|
||||
/* Check that our queue is available. */
|
||||
R_UNLESS(m_thread_running, htc::ResultTaskQueueNotAvailable());
|
||||
|
||||
/* Get the task. */
|
||||
T *task = m_task_table.Get<T>(task_id);
|
||||
R_UNLESS(task != nullptr, htc::ResultInvalidTaskId());
|
||||
|
||||
/* Add notification to our queue. */
|
||||
m_task_queue.Add(task_id, PacketCategory::Notification);
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
template<typename T> requires IsRpcTask<T>
|
||||
void WaitNotification(u32 task_id) {
|
||||
/* Get the task from the table, releasing our lock afterwards. */
|
||||
T *task;
|
||||
{
|
||||
/* Lock ourselves. */
|
||||
std::scoped_lock lk(m_mutex);
|
||||
|
||||
/* Get the task. */
|
||||
task = m_task_table.Get<T>(task_id);
|
||||
}
|
||||
|
||||
/* Wait for a notification. */
|
||||
task->WaitNotification();
|
||||
}
|
||||
|
||||
template<typename T> requires IsRpcTask<T>
|
||||
bool IsCancelled(u32 task_id) {
|
||||
/* Lock ourselves. */
|
||||
std::scoped_lock lk(m_mutex);
|
||||
|
||||
/* Get the task. */
|
||||
T *task = m_task_table.Get<T>(task_id);
|
||||
|
||||
/* Check the task state. */
|
||||
return task != nullptr && task->GetTaskState() == RpcTaskState::Cancelled;
|
||||
}
|
||||
|
||||
template<typename T> requires IsRpcTask<T>
|
||||
bool IsCompleted(u32 task_id) {
|
||||
/* Lock ourselves. */
|
||||
std::scoped_lock lk(m_mutex);
|
||||
|
||||
/* Get the task. */
|
||||
T *task = m_task_table.Get<T>(task_id);
|
||||
|
||||
/* Check the task state. */
|
||||
return task != nullptr && task->GetTaskState() == RpcTaskState::Completed;
|
||||
}
|
||||
|
||||
template<typename T> requires IsRpcTask<T>
|
||||
Result SendContinue(u32 task_id, const void *buffer, s64 buffer_size) {
|
||||
/* Lock ourselves. */
|
||||
std::scoped_lock lk(m_mutex);
|
||||
|
||||
/* Get the task. */
|
||||
T *task = m_task_table.Get<T>(task_id);
|
||||
R_UNLESS(task != nullptr, htc::ResultInvalidTaskId());
|
||||
|
||||
/* If the task was cancelled, handle that. */
|
||||
if (task->GetTaskState() == RpcTaskState::Cancelled) {
|
||||
switch (task->GetTaskCancelReason()) {
|
||||
case RpcTaskCancelReason::QueueNotAvailable:
|
||||
return htc::ResultTaskQueueNotAvailable();
|
||||
default:
|
||||
return htc::ResultTaskCancelled();
|
||||
}
|
||||
}
|
||||
|
||||
/* Set the task's buffer. */
|
||||
if (buffer_size > 0) {
|
||||
task->SetBuffer(buffer, buffer_size);
|
||||
os::SignalEvent(std::addressof(m_send_buffer_available_events[task_id]));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
template<typename T> requires IsRpcTask<T>
|
||||
Result ReceiveContinue(u32 task_id, void *buffer, s64 buffer_size) {
|
||||
/* Get the task's buffer, and prepare to receive. */
|
||||
const void *result_buffer;
|
||||
s64 result_size;
|
||||
{
|
||||
/* Lock ourselves. */
|
||||
std::scoped_lock lk(m_mutex);
|
||||
|
||||
/* Get the task. */
|
||||
T *task = m_task_table.Get<T>(task_id);
|
||||
R_UNLESS(task != nullptr, htc::ResultInvalidTaskId());
|
||||
|
||||
/* If the task was cancelled, handle that. */
|
||||
if (task->GetTaskState() == RpcTaskState::Cancelled) {
|
||||
switch (task->GetTaskCancelReason()) {
|
||||
case RpcTaskCancelReason::QueueNotAvailable:
|
||||
return htc::ResultTaskQueueNotAvailable();
|
||||
default:
|
||||
return htc::ResultTaskCancelled();
|
||||
}
|
||||
}
|
||||
|
||||
/* Get the result size. */
|
||||
result_size = task->GetResultSize();
|
||||
R_SUCCEED_IF(result_size == 0);
|
||||
|
||||
/* Get the result buffer. */
|
||||
result_buffer = task->GetBuffer();
|
||||
}
|
||||
|
||||
/* Wait for the receive buffer to become available. */
|
||||
os::WaitEvent(std::addressof(m_receive_buffer_available_events[task_id]));
|
||||
|
||||
/* Check that we weren't cancelled. */
|
||||
R_UNLESS(!m_cancelled, htc::ResultCancelled());
|
||||
|
||||
/* Copy the received data. */
|
||||
AMS_ASSERT(0 <= result_size && result_size <= buffer_size);
|
||||
std::memcpy(buffer, result_buffer, result_size);
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -31,9 +31,10 @@ namespace ams::htc::server::rpc {
|
|||
|
||||
class RpcTaskTable {
|
||||
private:
|
||||
/* TODO: How is this variable derived...? */
|
||||
/* Nintendo has a value of 0xE1D8, which is deeply magic. */
|
||||
static constexpr size_t MaxTaskSize = 0xA000;
|
||||
/* htcs::ReceiveSmallTask/htcs::ReceiveSendTask are the largest tasks, containing an inline 0xE000 buffer. */
|
||||
/* We allow for ~0x100 task overhead from the additional events those contain. */
|
||||
/* NOTE: Nintnedo hardcodes a maximum size of 0xE1D8, despite SendSmallTask being 0xE098 as of latest check. */
|
||||
static constexpr size_t MaxTaskSize = 0xE100;
|
||||
using TaskStorage = typename std::aligned_storage<MaxTaskSize, alignof(void *)>::type;
|
||||
private:
|
||||
bool m_valid[MaxRpcCount];
|
||||
|
|
|
@ -40,8 +40,8 @@ namespace ams::htc::server::rpc {
|
|||
|
||||
enum class RpcTaskCancelReason {
|
||||
None = 0,
|
||||
One = 1,
|
||||
Two = 2,
|
||||
BySocket = 1,
|
||||
ClientFinalized = 2,
|
||||
QueueNotAvailable = 3,
|
||||
};
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue