From c9d952b9103b600ddafc5d1c0e2f2dbd30f0b805 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 5 Oct 2024 19:06:50 -0600 Subject: [PATCH 1/2] io_uring/rw: fix cflags posting for single issue multishot read If multishot gets disabled, and hence the request will get terminated rather than persist for more iterations, then posting the CQE with the right cflags is still important. Most notably, the buffer reference needs to be included. Refactor the return of __io_read() a bit, so that the provided buffer is always put correctly, and hence returned to the application. Reported-by: Sharon Rosner Link: https://github.com/axboe/liburing/issues/1257 Cc: stable@vger.kernel.org Fixes: 2a975d426c82 ("io_uring/rw: don't allow multishot reads without NOWAIT support") Signed-off-by: Jens Axboe --- io_uring/rw.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/io_uring/rw.c b/io_uring/rw.c index f023ff49c688..93ad92605884 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -972,17 +972,21 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) if (issue_flags & IO_URING_F_MULTISHOT) return IOU_ISSUE_SKIP_COMPLETE; return -EAGAIN; - } - - /* - * Any successful return value will keep the multishot read armed. - */ - if (ret > 0 && req->flags & REQ_F_APOLL_MULTISHOT) { + } else if (ret <= 0) { + io_kbuf_recycle(req, issue_flags); + if (ret < 0) + req_set_fail(req); + } else { /* - * Put our buffer and post a CQE. If we fail to post a CQE, then + * Any successful return value will keep the multishot read + * armed, if it's still set. Put our buffer and post a CQE. If + * we fail to post a CQE, or multishot is no longer set, then * jump to the termination path. This request is then done. */ cflags = io_put_kbuf(req, ret, issue_flags); + if (!(req->flags & REQ_F_APOLL_MULTISHOT)) + goto done; + rw->len = 0; /* similarly to above, reset len to 0 */ if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { @@ -1003,6 +1007,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) * Either an error, or we've hit overflow posting the CQE. For any * multishot request, hitting overflow will terminate it. */ +done: io_req_set_res(req, ret, cflags); io_req_rw_cleanup(req, issue_flags); if (issue_flags & IO_URING_F_MULTISHOT) From f7c9134385331c5ef36252895130aa01a92de907 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 6 Oct 2024 10:40:36 -0600 Subject: [PATCH 2/2] io_uring/rw: allow pollable non-blocking attempts for !FMODE_NOWAIT The checking for whether or not io_uring can do a non-blocking read or write attempt is gated on FMODE_NOWAIT. However, if the file is pollable, it's feasible to just check if it's currently in a state in which it can sanely receive or send _some_ data. This avoids unnecessary io-wq punts, and repeated worthless retries before doing that punt, by assuming that some data can get delivered or received if poll tells us that is true. It also allows multishot reads to properly work with these types of files, enabling a bit of a cleanup of the logic that: c9d952b9103b ("io_uring/rw: fix cflags posting for single issue multishot read") had to put in place. Signed-off-by: Jens Axboe --- io_uring/rw.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/io_uring/rw.c b/io_uring/rw.c index 93ad92605884..80ae3c2ebb70 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -31,9 +31,19 @@ struct io_rw { rwf_t flags; }; -static inline bool io_file_supports_nowait(struct io_kiocb *req) +static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask) { - return req->flags & REQ_F_SUPPORT_NOWAIT; + /* If FMODE_NOWAIT is set for a file, we're golden */ + if (req->flags & REQ_F_SUPPORT_NOWAIT) + return true; + /* No FMODE_NOWAIT, if we can poll, check the status */ + if (io_file_can_poll(req)) { + struct poll_table_struct pt = { ._key = mask }; + + return vfs_poll(req->file, &pt) & mask; + } + /* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */ + return false; } #ifdef CONFIG_COMPAT @@ -796,8 +806,8 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) * supports async. Otherwise it's impossible to use O_NONBLOCK files * reliably. If not, or it IOCB_NOWAIT is set, don't retry. */ - if ((kiocb->ki_flags & IOCB_NOWAIT) || - ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) + if (kiocb->ki_flags & IOCB_NOWAIT || + ((file->f_flags & O_NONBLOCK && (req->flags & REQ_F_SUPPORT_NOWAIT)))) req->flags |= REQ_F_NOWAIT; if (ctx->flags & IORING_SETUP_IOPOLL) { @@ -838,7 +848,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) if (force_nonblock) { /* If the file doesn't support async, just async punt */ - if (unlikely(!io_file_supports_nowait(req))) + if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) return -EAGAIN; kiocb->ki_flags |= IOCB_NOWAIT; } else { @@ -951,13 +961,6 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) ret = __io_read(req, issue_flags); - /* - * If the file doesn't support proper NOWAIT, then disable multishot - * and stay in single shot mode. - */ - if (!io_file_supports_nowait(req)) - req->flags &= ~REQ_F_APOLL_MULTISHOT; - /* * If we get -EAGAIN, recycle our buffer and just let normal poll * handling arm it. @@ -984,9 +987,6 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) * jump to the termination path. This request is then done. */ cflags = io_put_kbuf(req, ret, issue_flags); - if (!(req->flags & REQ_F_APOLL_MULTISHOT)) - goto done; - rw->len = 0; /* similarly to above, reset len to 0 */ if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { @@ -1007,7 +1007,6 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) * Either an error, or we've hit overflow posting the CQE. For any * multishot request, hitting overflow will terminate it. */ -done: io_req_set_res(req, ret, cflags); io_req_rw_cleanup(req, issue_flags); if (issue_flags & IO_URING_F_MULTISHOT) @@ -1031,7 +1030,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) if (force_nonblock) { /* If the file doesn't support async, just async punt */ - if (unlikely(!io_file_supports_nowait(req))) + if (unlikely(!io_file_supports_nowait(req, EPOLLOUT))) goto ret_eagain; /* Check if we can support NOWAIT. */