mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
io_uring: clean up cqe trace points
We have too many helpers posting CQEs, instead of tracing completion events before filling in a CQE and thus having to pass all the data, set the CQE first, pass it to the tracing helper and let it extract everything it needs. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/b83c1ca9ee5aed2df0f3bb743bf5ed699cce4c86.1729267437.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
9b296c625a
commit
2946f08ae9
@ -662,4 +662,9 @@ struct io_overflow_cqe {
|
||||
struct io_uring_cqe cqe;
|
||||
};
|
||||
|
||||
static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return ctx->flags & IORING_SETUP_CQE32;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -315,20 +315,14 @@ TRACE_EVENT(io_uring_fail_link,
|
||||
* io_uring_complete - called when completing an SQE
|
||||
*
|
||||
* @ctx: pointer to a ring context structure
|
||||
* @req: pointer to a submitted request
|
||||
* @user_data: user data associated with the request
|
||||
* @res: result of the request
|
||||
* @cflags: completion flags
|
||||
* @extra1: extra 64-bit data for CQE32
|
||||
* @extra2: extra 64-bit data for CQE32
|
||||
*
|
||||
* @req: (optional) pointer to a submitted request
|
||||
* @cqe: pointer to the filled in CQE being posted
|
||||
*/
|
||||
TRACE_EVENT(io_uring_complete,
|
||||
|
||||
TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
|
||||
u64 extra1, u64 extra2),
|
||||
TP_PROTO(struct io_ring_ctx *ctx, void *req, struct io_uring_cqe *cqe),
|
||||
|
||||
TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
|
||||
TP_ARGS(ctx, req, cqe),
|
||||
|
||||
TP_STRUCT__entry (
|
||||
__field( void *, ctx )
|
||||
@ -343,11 +337,11 @@ TRACE_EVENT(io_uring_complete,
|
||||
TP_fast_assign(
|
||||
__entry->ctx = ctx;
|
||||
__entry->req = req;
|
||||
__entry->user_data = user_data;
|
||||
__entry->res = res;
|
||||
__entry->cflags = cflags;
|
||||
__entry->extra1 = extra1;
|
||||
__entry->extra2 = extra2;
|
||||
__entry->user_data = cqe->user_data;
|
||||
__entry->res = cqe->res;
|
||||
__entry->cflags = cqe->flags;
|
||||
__entry->extra1 = io_ctx_cqe32(ctx) ? cqe->big_cqe[0] : 0;
|
||||
__entry->extra2 = io_ctx_cqe32(ctx) ? cqe->big_cqe[1] : 0;
|
||||
),
|
||||
|
||||
TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
|
||||
|
@ -828,8 +828,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
|
||||
* the ring.
|
||||
*/
|
||||
if (likely(io_get_cqe(ctx, &cqe))) {
|
||||
trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
|
||||
|
||||
WRITE_ONCE(cqe->user_data, user_data);
|
||||
WRITE_ONCE(cqe->res, res);
|
||||
WRITE_ONCE(cqe->flags, cflags);
|
||||
@ -838,6 +836,8 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
|
||||
WRITE_ONCE(cqe->big_cqe[0], 0);
|
||||
WRITE_ONCE(cqe->big_cqe[1], 0);
|
||||
}
|
||||
|
||||
trace_io_uring_complete(ctx, NULL, cqe);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -189,16 +189,15 @@ static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
|
||||
if (unlikely(!io_get_cqe(ctx, &cqe)))
|
||||
return false;
|
||||
|
||||
if (trace_io_uring_complete_enabled())
|
||||
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
|
||||
req->cqe.res, req->cqe.flags,
|
||||
req->big_cqe.extra1, req->big_cqe.extra2);
|
||||
|
||||
memcpy(cqe, &req->cqe, sizeof(*cqe));
|
||||
if (ctx->flags & IORING_SETUP_CQE32) {
|
||||
memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
|
||||
memset(&req->big_cqe, 0, sizeof(req->big_cqe));
|
||||
}
|
||||
|
||||
if (trace_io_uring_complete_enabled())
|
||||
trace_io_uring_complete(req->ctx, req, cqe);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user