io_uring-6.12-20241011

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmcJO4oQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpit7D/43gTwIyq8ftiSKGOVwQqPGx37D3rQprl7/
 OuwLS0UgkngOAsimqmi12oyeBqKTfhnXNFgez1IHmE93ns1x07rc1KLc8esXzr0C
 lX/qUSWqrU9LIUEXJNjhCf4aXhpXFBOQoOf3p0pfL3VcQhm6hW6jVlOTEoF15QuC
 fliRVTfSduAaKojn8k/YJzPYFKN+AUgd+Ysa0tg7tuM1+UUKdXcEpeOc0JV6N2XL
 6W5AeKuSp+TOO+oYED4NA4WE1hHl2T8/T3DEwHAm4SLijWqE7GYl+cP0Nawiwwu8
 3UVF23kKvpqCD9vFqJoJ1QxTIEI8Js+ROZv+SH6x70fkRZGXJuV4kDGWIi5dBGDG
 AB+lEtQhezlVjlvMYNR2oygaR87rQWNyEB0atlWktFWz5P+rV9Zr8whgZtmnLVMH
 60ibHXzxzFEyMiHwJ8k7OAhG7O0cALN3SCnlLlqfW3FaCN0pHNAe50GBpzQmBPPy
 5r8Qj31rd1hIYB2JiU+F+5TIHREBUNw9Q6pgMw75xm3GNWkACFueNegeBWK+e5Uc
 JKWguIPiNLRfOWDkSVPGad84mb+IVaZOVepPNo338i9QHdrSMxewXPG53JxvYoMl
 +gwK1Y0E5cH6gEgfw5XvTTI+xBCHaNtMokQVwBrglrijwhU2iDdF4DFAOs6OIOkt
 aN/FrRBlHQ==
 =sE7Y
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-6.12-20241011' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:

 - Explicitly have a mshot_finished condition for IORING_OP_RECV in
   multishot mode, similarly to what IORING_OP_RECVMSG has. This doesn't
   fix a bug right now, but it makes it harder to actually have a bug
   here if a request takes multiple iterations to finish.

 - Fix handling of retry of read/write of !FMODE_NOWAIT files. If they
   are pollable, that's all we need.

* tag 'io_uring-6.12-20241011' of git://git.kernel.dk/linux:
  io_uring/rw: allow pollable non-blocking attempts for !FMODE_NOWAIT
  io_uring/rw: fix cflags posting for single issue multishot read
This commit is contained in:
Linus Torvalds 2024-10-11 12:00:21 -07:00
commit 9e4c6c1ad9

View File

@ -31,9 +31,19 @@ struct io_rw {
rwf_t flags; rwf_t flags;
}; };
static inline bool io_file_supports_nowait(struct io_kiocb *req) static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask)
{ {
return req->flags & REQ_F_SUPPORT_NOWAIT; /* If FMODE_NOWAIT is set for a file, we're golden */
if (req->flags & REQ_F_SUPPORT_NOWAIT)
return true;
/* No FMODE_NOWAIT, if we can poll, check the status */
if (io_file_can_poll(req)) {
struct poll_table_struct pt = { ._key = mask };
return vfs_poll(req->file, &pt) & mask;
}
/* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */
return false;
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
@ -796,8 +806,8 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
* supports async. Otherwise it's impossible to use O_NONBLOCK files * supports async. Otherwise it's impossible to use O_NONBLOCK files
* reliably. If not, or it IOCB_NOWAIT is set, don't retry. * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
*/ */
if ((kiocb->ki_flags & IOCB_NOWAIT) || if (kiocb->ki_flags & IOCB_NOWAIT ||
((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) ((file->f_flags & O_NONBLOCK && (req->flags & REQ_F_SUPPORT_NOWAIT))))
req->flags |= REQ_F_NOWAIT; req->flags |= REQ_F_NOWAIT;
if (ctx->flags & IORING_SETUP_IOPOLL) { if (ctx->flags & IORING_SETUP_IOPOLL) {
@ -838,7 +848,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
if (force_nonblock) { if (force_nonblock) {
/* If the file doesn't support async, just async punt */ /* If the file doesn't support async, just async punt */
if (unlikely(!io_file_supports_nowait(req))) if (unlikely(!io_file_supports_nowait(req, EPOLLIN)))
return -EAGAIN; return -EAGAIN;
kiocb->ki_flags |= IOCB_NOWAIT; kiocb->ki_flags |= IOCB_NOWAIT;
} else { } else {
@ -951,13 +961,6 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
ret = __io_read(req, issue_flags); ret = __io_read(req, issue_flags);
/*
* If the file doesn't support proper NOWAIT, then disable multishot
* and stay in single shot mode.
*/
if (!io_file_supports_nowait(req))
req->flags &= ~REQ_F_APOLL_MULTISHOT;
/* /*
* If we get -EAGAIN, recycle our buffer and just let normal poll * If we get -EAGAIN, recycle our buffer and just let normal poll
* handling arm it. * handling arm it.
@ -972,14 +975,15 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
if (issue_flags & IO_URING_F_MULTISHOT) if (issue_flags & IO_URING_F_MULTISHOT)
return IOU_ISSUE_SKIP_COMPLETE; return IOU_ISSUE_SKIP_COMPLETE;
return -EAGAIN; return -EAGAIN;
} } else if (ret <= 0) {
io_kbuf_recycle(req, issue_flags);
/* if (ret < 0)
* Any successful return value will keep the multishot read armed. req_set_fail(req);
*/ } else {
if (ret > 0 && req->flags & REQ_F_APOLL_MULTISHOT) {
/* /*
* Put our buffer and post a CQE. If we fail to post a CQE, then * Any successful return value will keep the multishot read
* armed, if it's still set. Put our buffer and post a CQE. If
* we fail to post a CQE, or multishot is no longer set, then
* jump to the termination path. This request is then done. * jump to the termination path. This request is then done.
*/ */
cflags = io_put_kbuf(req, ret, issue_flags); cflags = io_put_kbuf(req, ret, issue_flags);
@ -1026,7 +1030,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
if (force_nonblock) { if (force_nonblock) {
/* If the file doesn't support async, just async punt */ /* If the file doesn't support async, just async punt */
if (unlikely(!io_file_supports_nowait(req))) if (unlikely(!io_file_supports_nowait(req, EPOLLOUT)))
goto ret_eagain; goto ret_eagain;
/* Check if we can support NOWAIT. */ /* Check if we can support NOWAIT. */