io_uring: avoid normal tw intermediate fallback

When a DEFER_TASKRUN io_uring is terminating it requeues deferred task
work items as normal tw, which can further fallback to kthread
execution. Avoid this extra step and always push them to the fallback
kthread.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/d1cd472cec2230c66bd1c8d412a5833f0af75384.1730772720.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2024-11-05 02:12:33 +00:00 committed by Jens Axboe
parent 6bf90bd8c5
commit af0a2ffef0
2 changed files with 11 additions and 12 deletions

View File

@ -1066,9 +1066,8 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
return node; return node;
} }
static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync) static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
{ {
struct llist_node *node = llist_del_all(&tctx->task_list);
struct io_ring_ctx *last_ctx = NULL; struct io_ring_ctx *last_ctx = NULL;
struct io_kiocb *req; struct io_kiocb *req;
@ -1094,6 +1093,13 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
} }
} }
static void io_fallback_tw(struct io_uring_task *tctx, bool sync)
{
struct llist_node *node = llist_del_all(&tctx->task_list);
__io_fallback_tw(node, sync);
}
struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
unsigned int max_entries, unsigned int max_entries,
unsigned int *count) unsigned int *count)
@ -1247,16 +1253,9 @@ void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
{ {
struct llist_node *node; struct llist_node *node = llist_del_all(&ctx->work_llist);
node = llist_del_all(&ctx->work_llist); __io_fallback_tw(node, false);
while (node) {
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
node = node->next;
io_req_normal_work_add(req);
}
} }
static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events, static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,

View File

@ -136,7 +136,7 @@ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
* Not from an SQE, as those cannot be submitted, but via * Not from an SQE, as those cannot be submitted, but via
* updating tagged resources. * updating tagged resources.
*/ */
if (ctx->submitter_task->flags & PF_EXITING) if (percpu_ref_is_dying(&ctx->refs))
lockdep_assert(current_work()); lockdep_assert(current_work());
else else
lockdep_assert(current == ctx->submitter_task); lockdep_assert(current == ctx->submitter_task);