summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-09-03 09:52:01 -0600
committerJens Axboe <axboe@kernel.dk>2022-09-21 10:30:43 -0600
commitdac6a0eae793f53c62a0f83d9f5423293a7845c4 (patch)
tree35f95f765ca36d7c2f4abcfc82d3b6659410139a
parent8ac5d85a89b48269e5aefb92b640d38367670a1b (diff)
downloadlinux-dac6a0eae793f53c62a0f83d9f5423293a7845c4.tar.gz
io_uring: ensure iopoll runs local task work as well
Combine the two checks we have for task_work running and whether or not we need to shuffle the mutex into one, so we unify how task_work is run in the iopoll loop. This helps ensure that local task_work is run when needed, and also optimizes that path to avoid a mutex shuffle if it's not needed. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/io_uring.c39
-rw-r--r--io_uring/io_uring.h6
2 files changed, 26 insertions, 19 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index a1692dad52db..0482087b7c64 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1428,25 +1428,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
* forever, while the workqueue is stuck trying to acquire the
* very same mutex.
*/
- if (wq_list_empty(&ctx->iopoll_list)) {
- u32 tail = ctx->cached_cq_tail;
-
- mutex_unlock(&ctx->uring_lock);
- ret = io_run_task_work_ctx(ctx);
- mutex_lock(&ctx->uring_lock);
- if (ret < 0)
- break;
-
- /* some requests don't go through iopoll_list */
- if (tail != ctx->cached_cq_tail ||
- wq_list_empty(&ctx->iopoll_list))
- break;
- }
-
- if (task_work_pending(current)) {
- mutex_unlock(&ctx->uring_lock);
- io_run_task_work();
- mutex_lock(&ctx->uring_lock);
+ if (wq_list_empty(&ctx->iopoll_list) ||
+ io_task_work_pending(ctx)) {
+ if (!llist_empty(&ctx->work_llist))
+ __io_run_local_work(ctx, true);
+ if (task_work_pending(current) ||
+ wq_list_empty(&ctx->iopoll_list)) {
+ u32 tail = ctx->cached_cq_tail;
+
+ mutex_unlock(&ctx->uring_lock);
+ ret = io_run_task_work();
+ mutex_lock(&ctx->uring_lock);
+
+ if (ret < 0)
+ break;
+
+ /* some requests don't go through iopoll_list */
+ if (tail != ctx->cached_cq_tail ||
+ wq_list_empty(&ctx->iopoll_list))
+ break;
+ }
}
ret = io_do_iopoll(ctx, !min);
if (ret < 0)
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 0f90d1dfa42b..9d89425292b7 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -236,6 +236,12 @@ static inline int io_run_task_work(void)
return 0;
}
+static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
+{
+ return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
+ !wq_list_empty(&ctx->work_llist);
+}
+
static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
{
int ret = 0;