mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:46:16 +00:00
io_uring/kbuf: pass in 'len' argument for buffer commit
In preparation for needing the consumed length, pass in the length being completed. Unused right now, but will be used when it is possible to partially consume a buffer. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
641a681679
commit
6733e678ba
@ -904,7 +904,7 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res)
|
||||
lockdep_assert_held(&req->ctx->uring_lock);
|
||||
|
||||
req_set_fail(req);
|
||||
io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
|
||||
io_req_set_res(req, res, io_put_kbuf(req, res, IO_URING_F_UNLOCKED));
|
||||
if (def->fail)
|
||||
def->fail(req);
|
||||
io_req_complete_defer(req);
|
||||
|
@ -70,7 +70,7 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
|
||||
return true;
|
||||
}
|
||||
|
||||
void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
|
||||
void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags)
|
||||
{
|
||||
/*
|
||||
* We can add this buffer back to two lists:
|
||||
@ -88,12 +88,12 @@ void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
spin_lock(&ctx->completion_lock);
|
||||
__io_put_kbuf_list(req, &ctx->io_buffers_comp);
|
||||
__io_put_kbuf_list(req, len, &ctx->io_buffers_comp);
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
} else {
|
||||
lockdep_assert_held(&req->ctx->uring_lock);
|
||||
|
||||
__io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
|
||||
__io_put_kbuf_list(req, len, &req->ctx->io_buffers_cache);
|
||||
}
|
||||
}
|
||||
|
||||
@ -165,7 +165,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
|
||||
* the transfer completes (or if we get -EAGAIN and must poll of
|
||||
* retry).
|
||||
*/
|
||||
io_kbuf_commit(req, bl, 1);
|
||||
io_kbuf_commit(req, bl, *len, 1);
|
||||
req->buf_list = NULL;
|
||||
}
|
||||
return u64_to_user_ptr(buf->addr);
|
||||
@ -291,7 +291,7 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
|
||||
*/
|
||||
if (ret > 0) {
|
||||
req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
|
||||
io_kbuf_commit(req, bl, ret);
|
||||
io_kbuf_commit(req, bl, arg->out_len, ret);
|
||||
}
|
||||
} else {
|
||||
ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
|
||||
|
@ -77,7 +77,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
|
||||
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
|
||||
int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
|
||||
|
||||
void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
|
||||
void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags);
|
||||
|
||||
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
|
||||
|
||||
@ -125,7 +125,7 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
|
||||
#define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
|
||||
|
||||
static inline void io_kbuf_commit(struct io_kiocb *req,
|
||||
struct io_buffer_list *bl, int nr)
|
||||
struct io_buffer_list *bl, int len, int nr)
|
||||
{
|
||||
if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
|
||||
return;
|
||||
@ -133,22 +133,22 @@ static inline void io_kbuf_commit(struct io_kiocb *req,
|
||||
req->flags &= ~REQ_F_BUFFERS_COMMIT;
|
||||
}
|
||||
|
||||
static inline void __io_put_kbuf_ring(struct io_kiocb *req, int nr)
|
||||
static inline void __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
|
||||
{
|
||||
struct io_buffer_list *bl = req->buf_list;
|
||||
|
||||
if (bl) {
|
||||
io_kbuf_commit(req, bl, nr);
|
||||
io_kbuf_commit(req, bl, len, nr);
|
||||
req->buf_index = bl->bgid;
|
||||
}
|
||||
req->flags &= ~REQ_F_BUFFER_RING;
|
||||
}
|
||||
|
||||
static inline void __io_put_kbuf_list(struct io_kiocb *req,
|
||||
static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
|
||||
struct list_head *list)
|
||||
{
|
||||
if (req->flags & REQ_F_BUFFER_RING) {
|
||||
__io_put_kbuf_ring(req, 1);
|
||||
__io_put_kbuf_ring(req, len, 1);
|
||||
} else {
|
||||
req->buf_index = req->kbuf->bgid;
|
||||
list_add(&req->kbuf->list, list);
|
||||
@ -163,11 +163,12 @@ static inline void io_kbuf_drop(struct io_kiocb *req)
|
||||
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
|
||||
return;
|
||||
|
||||
__io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
|
||||
/* len == 0 is fine here, non-ring will always drop all of it */
|
||||
__io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
|
||||
}
|
||||
|
||||
static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs,
|
||||
unsigned issue_flags)
|
||||
static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,
|
||||
int nbufs, unsigned issue_flags)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
@ -176,21 +177,21 @@ static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs,
|
||||
|
||||
ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
|
||||
if (req->flags & REQ_F_BUFFER_RING)
|
||||
__io_put_kbuf_ring(req, nbufs);
|
||||
__io_put_kbuf_ring(req, len, nbufs);
|
||||
else
|
||||
__io_put_kbuf(req, issue_flags);
|
||||
__io_put_kbuf(req, len, issue_flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned int io_put_kbuf(struct io_kiocb *req,
|
||||
static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
|
||||
unsigned issue_flags)
|
||||
{
|
||||
return __io_put_kbufs(req, 1, issue_flags);
|
||||
return __io_put_kbufs(req, len, 1, issue_flags);
|
||||
}
|
||||
|
||||
static inline unsigned int io_put_kbufs(struct io_kiocb *req, int nbufs,
|
||||
unsigned issue_flags)
|
||||
static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
|
||||
int nbufs, unsigned issue_flags)
|
||||
{
|
||||
return __io_put_kbufs(req, nbufs, issue_flags);
|
||||
return __io_put_kbufs(req, len, nbufs, issue_flags);
|
||||
}
|
||||
#endif
|
||||
|
@ -497,11 +497,11 @@ static inline bool io_send_finish(struct io_kiocb *req, int *ret,
|
||||
unsigned int cflags;
|
||||
|
||||
if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
|
||||
cflags = io_put_kbuf(req, issue_flags);
|
||||
cflags = io_put_kbuf(req, *ret, issue_flags);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
cflags = io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret), issue_flags);
|
||||
cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags);
|
||||
|
||||
if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
|
||||
goto finish;
|
||||
@ -842,13 +842,13 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
|
||||
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
|
||||
|
||||
if (sr->flags & IORING_RECVSEND_BUNDLE) {
|
||||
cflags |= io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret),
|
||||
cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret),
|
||||
issue_flags);
|
||||
/* bundle with no more immediate buffers, we're done */
|
||||
if (req->flags & REQ_F_BL_EMPTY)
|
||||
goto finish;
|
||||
} else {
|
||||
cflags |= io_put_kbuf(req, issue_flags);
|
||||
cflags |= io_put_kbuf(req, *ret, issue_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -511,7 +511,7 @@ void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
io_req_io_end(req);
|
||||
|
||||
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
|
||||
req->cqe.flags |= io_put_kbuf(req, 0);
|
||||
req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0);
|
||||
|
||||
io_req_rw_cleanup(req, 0);
|
||||
io_req_task_complete(req, ts);
|
||||
@ -593,7 +593,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
|
||||
*/
|
||||
io_req_io_end(req);
|
||||
io_req_set_res(req, final_ret,
|
||||
io_put_kbuf(req, issue_flags));
|
||||
io_put_kbuf(req, ret, issue_flags));
|
||||
io_req_rw_cleanup(req, issue_flags);
|
||||
return IOU_OK;
|
||||
}
|
||||
@ -975,7 +975,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
|
||||
* Put our buffer and post a CQE. If we fail to post a CQE, then
|
||||
* jump to the termination path. This request is then done.
|
||||
*/
|
||||
cflags = io_put_kbuf(req, issue_flags);
|
||||
cflags = io_put_kbuf(req, ret, issue_flags);
|
||||
rw->len = 0; /* similarly to above, reset len to 0 */
|
||||
|
||||
if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
|
||||
@ -1167,7 +1167,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
|
||||
if (!smp_load_acquire(&req->iopoll_completed))
|
||||
break;
|
||||
nr_events++;
|
||||
req->cqe.flags = io_put_kbuf(req, 0);
|
||||
req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0);
|
||||
if (req->opcode != IORING_OP_URING_CMD)
|
||||
io_req_rw_cleanup(req, 0);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user