summaryrefslogtreecommitdiff
path: root/io_uring/msg_ring.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2023-01-31 11:22:15 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2023-01-31 12:23:23 +0100
commitaebd8f0c6f8280ba35bc989f4a9ea47469d3589a (patch)
treea7c0f61c1827bb2904f453cc055dda138366c438 /io_uring/msg_ring.c
parent155c6b16eec2eaaaf6c71abf2d5e71641770d7ba (diff)
parent6d796c50f84ca79f1722bb131799e5a5710c4700 (diff)
downloadlinux-aebd8f0c6f8280ba35bc989f4a9ea47469d3589a.tar.gz
Merge v6.2-rc6 into drm-next
Due to holidays we started -next with more -fixes in-flight than usual, and people have been asking where they are. Backmerge to get things better in sync. Conflicts: - Tiny conflict in drm_fbdev_generic.c between variable rename and missing error handling that got added. - Conflict in drm_fb_helper.c between the added call to vgaswitcheroo in drm_fb_helper_single_fb_probe and a refactor patch that extracted lots of helpers and incidentally removed the dev local variable. Readd it to make things compile. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'io_uring/msg_ring.c')
-rw-r--r--io_uring/msg_ring.c130
1 files changed, 82 insertions, 48 deletions
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 2d3cd945a531..15602a136821 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -25,6 +25,28 @@ struct io_msg {
u32 flags;
};
+static void io_double_unlock_ctx(struct io_ring_ctx *octx)
+{
+ mutex_unlock(&octx->uring_lock);
+}
+
+static int io_double_lock_ctx(struct io_ring_ctx *octx,
+ unsigned int issue_flags)
+{
+ /*
+ * To ensure proper ordering between the two ctxs, we can only
+ * attempt a trylock on the target. If that fails and we already have
+ * the source ctx lock, punt to io-wq.
+ */
+ if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+ if (!mutex_trylock(&octx->uring_lock))
+ return -EAGAIN;
+ return 0;
+ }
+ mutex_lock(&octx->uring_lock);
+ return 0;
+}
+
void io_msg_ring_cleanup(struct io_kiocb *req)
{
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
@@ -36,6 +58,29 @@ void io_msg_ring_cleanup(struct io_kiocb *req)
msg->src_file = NULL;
}
+static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
+{
+ if (!target_ctx->task_complete)
+ return false;
+ return current != target_ctx->submitter_task;
+}
+
+static int io_msg_exec_remote(struct io_kiocb *req, task_work_func_t func)
+{
+ struct io_ring_ctx *ctx = req->file->private_data;
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ struct task_struct *task = READ_ONCE(ctx->submitter_task);
+
+ if (unlikely(!task))
+ return -EOWNERDEAD;
+
+ init_task_work(&msg->tw, func);
+ if (task_work_add(ctx->submitter_task, &msg->tw, TWA_SIGNAL))
+ return -EOWNERDEAD;
+
+ return IOU_ISSUE_SKIP_COMPLETE;
+}
+
static void io_msg_tw_complete(struct callback_head *head)
{
struct io_msg *msg = container_of(head, struct io_msg, tw);
@@ -43,61 +88,54 @@ static void io_msg_tw_complete(struct callback_head *head)
struct io_ring_ctx *target_ctx = req->file->private_data;
int ret = 0;
- if (current->flags & PF_EXITING)
+ if (current->flags & PF_EXITING) {
ret = -EOWNERDEAD;
- else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
- ret = -EOVERFLOW;
+ } else {
+ /*
+ * If the target ring is using IOPOLL mode, then we need to be
+ * holding the uring_lock for posting completions. Other ring
+ * types rely on the regular completion locking, which is
+ * handled while posting.
+ */
+ if (target_ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_lock(&target_ctx->uring_lock);
+ if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = -EOVERFLOW;
+ if (target_ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_unlock(&target_ctx->uring_lock);
+ }
if (ret < 0)
req_set_fail(req);
io_req_queue_tw_complete(req, ret);
}
-static int io_msg_ring_data(struct io_kiocb *req)
+static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *target_ctx = req->file->private_data;
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ int ret;
if (msg->src_fd || msg->dst_fd || msg->flags)
return -EINVAL;
+ if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+ return -EBADFD;
- if (target_ctx->task_complete && current != target_ctx->submitter_task) {
- init_task_work(&msg->tw, io_msg_tw_complete);
- if (task_work_add(target_ctx->submitter_task, &msg->tw,
- TWA_SIGNAL_NO_IPI))
- return -EOWNERDEAD;
-
- atomic_or(IORING_SQ_TASKRUN, &target_ctx->rings->sq_flags);
- return IOU_ISSUE_SKIP_COMPLETE;
- }
-
- if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
- return 0;
+ if (io_msg_need_remote(target_ctx))
+ return io_msg_exec_remote(req, io_msg_tw_complete);
- return -EOVERFLOW;
-}
-
-static void io_double_unlock_ctx(struct io_ring_ctx *octx,
- unsigned int issue_flags)
-{
- mutex_unlock(&octx->uring_lock);
-}
-
-static int io_double_lock_ctx(struct io_ring_ctx *octx,
- unsigned int issue_flags)
-{
- /*
- * To ensure proper ordering between the two ctxs, we can only
- * attempt a trylock on the target. If that fails and we already have
- * the source ctx lock, punt to io-wq.
- */
- if (!(issue_flags & IO_URING_F_UNLOCKED)) {
- if (!mutex_trylock(&octx->uring_lock))
+ ret = -EOVERFLOW;
+ if (target_ctx->flags & IORING_SETUP_IOPOLL) {
+ if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
return -EAGAIN;
- return 0;
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = 0;
+ io_double_unlock_ctx(target_ctx);
+ } else {
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = 0;
}
- mutex_lock(&octx->uring_lock);
- return 0;
+ return ret;
}
static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
@@ -148,7 +186,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag
if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
ret = -EOVERFLOW;
out_unlock:
- io_double_unlock_ctx(target_ctx, issue_flags);
+ io_double_unlock_ctx(target_ctx);
return ret;
}
@@ -174,6 +212,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
if (target_ctx == ctx)
return -EINVAL;
+ if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+ return -EBADFD;
if (!src_file) {
src_file = io_msg_grab_file(req, issue_flags);
if (!src_file)
@@ -182,14 +222,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
req->flags |= REQ_F_NEED_CLEANUP;
}
- if (target_ctx->task_complete && current != target_ctx->submitter_task) {
- init_task_work(&msg->tw, io_msg_tw_fd_complete);
- if (task_work_add(target_ctx->submitter_task, &msg->tw,
- TWA_SIGNAL))
- return -EOWNERDEAD;
-
- return IOU_ISSUE_SKIP_COMPLETE;
- }
+ if (io_msg_need_remote(target_ctx))
+ return io_msg_exec_remote(req, io_msg_tw_fd_complete);
return io_msg_install_complete(req, issue_flags);
}
@@ -224,7 +258,7 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
switch (msg->cmd) {
case IORING_MSG_DATA:
- ret = io_msg_ring_data(req);
+ ret = io_msg_ring_data(req, issue_flags);
break;
case IORING_MSG_SEND_FD:
ret = io_msg_send_fd(req, issue_flags);