mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
io_uring/net: switch io_recv() to using io_async_msghdr
No functional changes in this patch, just in preparation for carrying more state than what is available now, if necessary. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
54cdcca05a
commit
4a3223f7bf
@ -320,7 +320,7 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int io_send_prep_async(struct io_kiocb *req)
|
int io_sendrecv_prep_async(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||||
struct io_async_msghdr *io;
|
struct io_async_msghdr *io;
|
||||||
@ -703,13 +703,13 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
|
|||||||
* again (for multishot).
|
* again (for multishot).
|
||||||
*/
|
*/
|
||||||
static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
|
static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
|
||||||
struct msghdr *msg, bool mshot_finished,
|
struct io_async_msghdr *kmsg,
|
||||||
unsigned issue_flags)
|
bool mshot_finished, unsigned issue_flags)
|
||||||
{
|
{
|
||||||
unsigned int cflags;
|
unsigned int cflags;
|
||||||
|
|
||||||
cflags = io_put_kbuf(req, issue_flags);
|
cflags = io_put_kbuf(req, issue_flags);
|
||||||
if (msg->msg_inq > 0)
|
if (kmsg->msg.msg_inq > 0)
|
||||||
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
|
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -723,7 +723,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
|
|||||||
|
|
||||||
io_recv_prep_retry(req);
|
io_recv_prep_retry(req);
|
||||||
/* Known not-empty or unknown state, retry */
|
/* Known not-empty or unknown state, retry */
|
||||||
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq < 0) {
|
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
|
||||||
if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
|
if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
|
||||||
return false;
|
return false;
|
||||||
/* mshot retries exceeded, force a requeue */
|
/* mshot retries exceeded, force a requeue */
|
||||||
@ -924,7 +924,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
else
|
else
|
||||||
io_kbuf_recycle(req, issue_flags);
|
io_kbuf_recycle(req, issue_flags);
|
||||||
|
|
||||||
if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
|
if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
|
||||||
goto retry_multishot;
|
goto retry_multishot;
|
||||||
|
|
||||||
if (mshot_finished)
|
if (mshot_finished)
|
||||||
@ -938,29 +938,42 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
int io_recv(struct io_kiocb *req, unsigned int issue_flags)
|
int io_recv(struct io_kiocb *req, unsigned int issue_flags)
|
||||||
{
|
{
|
||||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||||
struct msghdr msg;
|
struct io_async_msghdr iomsg, *kmsg;
|
||||||
struct socket *sock;
|
struct socket *sock;
|
||||||
unsigned flags;
|
unsigned flags;
|
||||||
int ret, min_ret = 0;
|
int ret, min_ret = 0;
|
||||||
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
|
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
|
||||||
size_t len = sr->len;
|
size_t len = sr->len;
|
||||||
|
|
||||||
|
if (req_has_async_data(req)) {
|
||||||
|
kmsg = req->async_data;
|
||||||
|
} else {
|
||||||
|
kmsg = &iomsg;
|
||||||
|
kmsg->free_iov = NULL;
|
||||||
|
kmsg->msg.msg_name = NULL;
|
||||||
|
kmsg->msg.msg_namelen = 0;
|
||||||
|
kmsg->msg.msg_control = NULL;
|
||||||
|
kmsg->msg.msg_get_inq = 1;
|
||||||
|
kmsg->msg.msg_controllen = 0;
|
||||||
|
kmsg->msg.msg_iocb = NULL;
|
||||||
|
kmsg->msg.msg_ubuf = NULL;
|
||||||
|
|
||||||
|
if (!io_do_buffer_select(req)) {
|
||||||
|
ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
|
||||||
|
&kmsg->msg.msg_iter);
|
||||||
|
if (unlikely(ret))
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!(req->flags & REQ_F_POLLED) &&
|
if (!(req->flags & REQ_F_POLLED) &&
|
||||||
(sr->flags & IORING_RECVSEND_POLL_FIRST))
|
(sr->flags & IORING_RECVSEND_POLL_FIRST))
|
||||||
return -EAGAIN;
|
return io_setup_async_msg(req, kmsg, issue_flags);
|
||||||
|
|
||||||
sock = sock_from_file(req->file);
|
sock = sock_from_file(req->file);
|
||||||
if (unlikely(!sock))
|
if (unlikely(!sock))
|
||||||
return -ENOTSOCK;
|
return -ENOTSOCK;
|
||||||
|
|
||||||
msg.msg_name = NULL;
|
|
||||||
msg.msg_namelen = 0;
|
|
||||||
msg.msg_control = NULL;
|
|
||||||
msg.msg_get_inq = 1;
|
|
||||||
msg.msg_controllen = 0;
|
|
||||||
msg.msg_iocb = NULL;
|
|
||||||
msg.msg_ubuf = NULL;
|
|
||||||
|
|
||||||
flags = sr->msg_flags;
|
flags = sr->msg_flags;
|
||||||
if (force_nonblock)
|
if (force_nonblock)
|
||||||
flags |= MSG_DONTWAIT;
|
flags |= MSG_DONTWAIT;
|
||||||
@ -974,22 +987,23 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
return -ENOBUFS;
|
return -ENOBUFS;
|
||||||
sr->buf = buf;
|
sr->buf = buf;
|
||||||
sr->len = len;
|
sr->len = len;
|
||||||
|
ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
|
||||||
|
&kmsg->msg.msg_iter);
|
||||||
|
if (unlikely(ret))
|
||||||
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
|
kmsg->msg.msg_inq = -1;
|
||||||
if (unlikely(ret))
|
kmsg->msg.msg_flags = 0;
|
||||||
goto out_free;
|
|
||||||
|
|
||||||
msg.msg_inq = -1;
|
|
||||||
msg.msg_flags = 0;
|
|
||||||
|
|
||||||
if (flags & MSG_WAITALL)
|
if (flags & MSG_WAITALL)
|
||||||
min_ret = iov_iter_count(&msg.msg_iter);
|
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
|
||||||
|
|
||||||
ret = sock_recvmsg(sock, &msg, flags);
|
ret = sock_recvmsg(sock, &kmsg->msg, flags);
|
||||||
if (ret < min_ret) {
|
if (ret < min_ret) {
|
||||||
if (ret == -EAGAIN && force_nonblock) {
|
if (ret == -EAGAIN && force_nonblock) {
|
||||||
if (issue_flags & IO_URING_F_MULTISHOT) {
|
ret = io_setup_async_msg(req, kmsg, issue_flags);
|
||||||
|
if (ret == -EAGAIN && issue_flags & IO_URING_F_MULTISHOT) {
|
||||||
io_kbuf_recycle(req, issue_flags);
|
io_kbuf_recycle(req, issue_flags);
|
||||||
return IOU_ISSUE_SKIP_COMPLETE;
|
return IOU_ISSUE_SKIP_COMPLETE;
|
||||||
}
|
}
|
||||||
@ -1001,12 +1015,12 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
sr->buf += ret;
|
sr->buf += ret;
|
||||||
sr->done_io += ret;
|
sr->done_io += ret;
|
||||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||||
return -EAGAIN;
|
return io_setup_async_msg(req, kmsg, issue_flags);
|
||||||
}
|
}
|
||||||
if (ret == -ERESTARTSYS)
|
if (ret == -ERESTARTSYS)
|
||||||
ret = -EINTR;
|
ret = -EINTR;
|
||||||
req_set_fail(req);
|
req_set_fail(req);
|
||||||
} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
|
} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
|
||||||
out_free:
|
out_free:
|
||||||
req_set_fail(req);
|
req_set_fail(req);
|
||||||
}
|
}
|
||||||
@ -1018,9 +1032,14 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
else
|
else
|
||||||
io_kbuf_recycle(req, issue_flags);
|
io_kbuf_recycle(req, issue_flags);
|
||||||
|
|
||||||
if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
|
if (!io_recv_finish(req, &ret, kmsg, ret <= 0, issue_flags))
|
||||||
goto retry_multishot;
|
goto retry_multishot;
|
||||||
|
|
||||||
|
if (ret == -EAGAIN)
|
||||||
|
return io_setup_async_msg(req, kmsg, issue_flags);
|
||||||
|
else if (ret != IOU_OK && ret != IOU_STOP_MULTISHOT)
|
||||||
|
io_req_msg_cleanup(req, kmsg, issue_flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|||||||
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
|
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
|
||||||
|
|
||||||
int io_send(struct io_kiocb *req, unsigned int issue_flags);
|
int io_send(struct io_kiocb *req, unsigned int issue_flags);
|
||||||
int io_send_prep_async(struct io_kiocb *req);
|
int io_sendrecv_prep_async(struct io_kiocb *req);
|
||||||
|
|
||||||
int io_recvmsg_prep_async(struct io_kiocb *req);
|
int io_recvmsg_prep_async(struct io_kiocb *req);
|
||||||
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||||
|
@ -604,13 +604,16 @@ const struct io_cold_def io_cold_defs[] = {
|
|||||||
.async_size = sizeof(struct io_async_msghdr),
|
.async_size = sizeof(struct io_async_msghdr),
|
||||||
.cleanup = io_sendmsg_recvmsg_cleanup,
|
.cleanup = io_sendmsg_recvmsg_cleanup,
|
||||||
.fail = io_sendrecv_fail,
|
.fail = io_sendrecv_fail,
|
||||||
.prep_async = io_send_prep_async,
|
.prep_async = io_sendrecv_prep_async,
|
||||||
#endif
|
#endif
|
||||||
},
|
},
|
||||||
[IORING_OP_RECV] = {
|
[IORING_OP_RECV] = {
|
||||||
.name = "RECV",
|
.name = "RECV",
|
||||||
#if defined(CONFIG_NET)
|
#if defined(CONFIG_NET)
|
||||||
|
.async_size = sizeof(struct io_async_msghdr),
|
||||||
|
.cleanup = io_sendmsg_recvmsg_cleanup,
|
||||||
.fail = io_sendrecv_fail,
|
.fail = io_sendrecv_fail,
|
||||||
|
.prep_async = io_sendrecv_prep_async,
|
||||||
#endif
|
#endif
|
||||||
},
|
},
|
||||||
[IORING_OP_OPENAT2] = {
|
[IORING_OP_OPENAT2] = {
|
||||||
@ -687,7 +690,7 @@ const struct io_cold_def io_cold_defs[] = {
|
|||||||
.name = "SEND_ZC",
|
.name = "SEND_ZC",
|
||||||
#if defined(CONFIG_NET)
|
#if defined(CONFIG_NET)
|
||||||
.async_size = sizeof(struct io_async_msghdr),
|
.async_size = sizeof(struct io_async_msghdr),
|
||||||
.prep_async = io_send_prep_async,
|
.prep_async = io_sendrecv_prep_async,
|
||||||
.cleanup = io_send_zc_cleanup,
|
.cleanup = io_send_zc_cleanup,
|
||||||
.fail = io_sendrecv_fail,
|
.fail = io_sendrecv_fail,
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user