2022-05-26 02:36:47 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/namei.h>
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
#include <linux/nospec.h>
|
2022-05-26 02:36:47 +00:00
|
|
|
#include <linux/io_uring.h>
|
|
|
|
|
|
|
|
#include <uapi/linux/io_uring.h>
|
|
|
|
|
|
|
|
#include "io_uring.h"
|
|
|
|
#include "tctx.h"
|
|
|
|
#include "poll.h"
|
|
|
|
#include "timeout.h"
|
2023-07-10 22:14:37 +00:00
|
|
|
#include "waitid.h"
|
io_uring: add support for futex wake and wait
Add support for FUTEX_WAKE/WAIT primitives.
IORING_OP_FUTEX_WAKE is mix of FUTEX_WAKE and FUTEX_WAKE_BITSET, as
it does support passing in a bitset.
Similary, IORING_OP_FUTEX_WAIT is a mix of FUTEX_WAIT and
FUTEX_WAIT_BITSET.
For both of them, they are using the futex2 interface.
FUTEX_WAKE is straight forward, as those can always be done directly from
the io_uring submission without needing async handling. For FUTEX_WAIT,
things are a bit more complicated. If the futex isn't ready, then we
rely on a callback via futex_queue->wake() when someone wakes up the
futex. From that calback, we queue up task_work with the original task,
which will post a CQE and wake it, if necessary.
Cancelations are supported, both from the application point-of-view,
but also to be able to cancel pending waits if the ring exits before
all events have occurred. The return value of futex_unqueue() is used
to gate who wins the potential race between cancelation and futex
wakeups. Whomever gets a 'ret == 1' return from that claims ownership
of the io_uring futex request.
This is just the barebones wait/wake support. PI or REQUEUE support is
not added at this point, unclear if we might look into that later.
Likewise, explicit timeouts are not supported either. It is expected
that users that need timeouts would do so via the usual io_uring
mechanism to do that using linked timeouts.
The SQE format is as follows:
`addr` Address of futex
`fd` futex2(2) FUTEX2_* flags
`futex_flags` io_uring specific command flags. None valid now.
`addr2` Value of futex
`addr3` Mask to wake/wait
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-06-08 17:57:40 +00:00
|
|
|
#include "futex.h"
|
2022-05-26 02:36:47 +00:00
|
|
|
#include "cancel.h"
|
|
|
|
|
|
|
|
struct io_cancel {
|
|
|
|
struct file *file;
|
|
|
|
u64 addr;
|
|
|
|
u32 flags;
|
|
|
|
s32 fd;
|
2023-06-23 16:36:43 +00:00
|
|
|
u8 opcode;
|
2022-05-26 02:36:47 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
|
2023-06-23 16:33:11 +00:00
|
|
|
IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
|
2023-06-23 16:36:43 +00:00
|
|
|
IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
|
2022-05-26 02:36:47 +00:00
|
|
|
|
2023-06-23 15:00:24 +00:00
|
|
|
/*
|
|
|
|
* Returns true if the request matches the criteria outlined by 'cd'.
|
|
|
|
*/
|
|
|
|
bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
|
2022-05-26 02:36:47 +00:00
|
|
|
{
|
2023-06-23 16:33:11 +00:00
|
|
|
bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
|
|
|
|
|
2022-05-26 02:36:47 +00:00
|
|
|
if (req->ctx != cd->ctx)
|
|
|
|
return false;
|
2023-06-23 16:33:11 +00:00
|
|
|
|
2023-06-23 16:36:43 +00:00
|
|
|
if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
|
2023-06-23 16:33:11 +00:00
|
|
|
match_user_data = true;
|
|
|
|
|
|
|
|
if (cd->flags & IORING_ASYNC_CANCEL_ANY)
|
2023-06-23 15:38:26 +00:00
|
|
|
goto check_seq;
|
2023-06-23 16:33:11 +00:00
|
|
|
if (cd->flags & IORING_ASYNC_CANCEL_FD) {
|
2022-05-26 02:36:47 +00:00
|
|
|
if (req->file != cd->file)
|
|
|
|
return false;
|
|
|
|
}
|
2023-06-23 16:36:43 +00:00
|
|
|
if (cd->flags & IORING_ASYNC_CANCEL_OP) {
|
|
|
|
if (req->opcode != cd->opcode)
|
|
|
|
return false;
|
|
|
|
}
|
2023-06-23 16:33:11 +00:00
|
|
|
if (match_user_data && req->cqe.user_data != cd->data)
|
|
|
|
return false;
|
2023-06-23 15:38:26 +00:00
|
|
|
if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
|
|
|
|
check_seq:
|
2024-01-29 03:11:55 +00:00
|
|
|
if (io_cancel_match_sequence(req, cd->seq))
|
2022-05-26 02:36:47 +00:00
|
|
|
return false;
|
|
|
|
}
|
2023-06-23 15:00:24 +00:00
|
|
|
|
2022-05-26 02:36:47 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-06-23 15:00:24 +00:00
|
|
|
static bool io_cancel_cb(struct io_wq_work *work, void *data)
|
|
|
|
{
|
|
|
|
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
|
|
|
|
struct io_cancel_data *cd = data;
|
|
|
|
|
|
|
|
return io_cancel_req_match(req, cd);
|
|
|
|
}
|
|
|
|
|
2022-05-26 02:36:47 +00:00
|
|
|
static int io_async_cancel_one(struct io_uring_task *tctx,
|
|
|
|
struct io_cancel_data *cd)
|
|
|
|
{
|
|
|
|
enum io_wq_cancel cancel_ret;
|
|
|
|
int ret = 0;
|
|
|
|
bool all;
|
|
|
|
|
|
|
|
if (!tctx || !tctx->io_wq)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
|
|
|
|
cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
|
|
|
|
switch (cancel_ret) {
|
|
|
|
case IO_WQ_CANCEL_OK:
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
case IO_WQ_CANCEL_RUNNING:
|
|
|
|
ret = -EALREADY;
|
|
|
|
break;
|
|
|
|
case IO_WQ_CANCEL_NOTFOUND:
|
|
|
|
ret = -ENOENT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-06-18 15:23:54 +00:00
|
|
|
int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
|
2022-06-16 09:22:11 +00:00
|
|
|
unsigned issue_flags)
|
2022-05-26 02:36:47 +00:00
|
|
|
{
|
2022-06-18 15:23:54 +00:00
|
|
|
struct io_ring_ctx *ctx = cd->ctx;
|
2022-05-26 02:36:47 +00:00
|
|
|
int ret;
|
|
|
|
|
2022-06-18 15:23:54 +00:00
|
|
|
WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
|
2022-05-26 02:36:47 +00:00
|
|
|
|
2022-06-18 15:23:54 +00:00
|
|
|
ret = io_async_cancel_one(tctx, cd);
|
2022-05-26 02:36:47 +00:00
|
|
|
/*
|
|
|
|
* Fall-through even for -EALREADY, as we may have poll armed
|
|
|
|
* that need unarming.
|
|
|
|
*/
|
|
|
|
if (!ret)
|
|
|
|
return 0;
|
|
|
|
|
2022-06-16 09:22:11 +00:00
|
|
|
ret = io_poll_cancel(ctx, cd, issue_flags);
|
2022-05-26 02:36:47 +00:00
|
|
|
if (ret != -ENOENT)
|
2022-06-16 09:22:04 +00:00
|
|
|
return ret;
|
|
|
|
|
2023-07-10 22:14:37 +00:00
|
|
|
ret = io_waitid_cancel(ctx, cd, issue_flags);
|
|
|
|
if (ret != -ENOENT)
|
|
|
|
return ret;
|
|
|
|
|
io_uring: add support for futex wake and wait
Add support for FUTEX_WAKE/WAIT primitives.
IORING_OP_FUTEX_WAKE is mix of FUTEX_WAKE and FUTEX_WAKE_BITSET, as
it does support passing in a bitset.
Similary, IORING_OP_FUTEX_WAIT is a mix of FUTEX_WAIT and
FUTEX_WAIT_BITSET.
For both of them, they are using the futex2 interface.
FUTEX_WAKE is straight forward, as those can always be done directly from
the io_uring submission without needing async handling. For FUTEX_WAIT,
things are a bit more complicated. If the futex isn't ready, then we
rely on a callback via futex_queue->wake() when someone wakes up the
futex. From that calback, we queue up task_work with the original task,
which will post a CQE and wake it, if necessary.
Cancelations are supported, both from the application point-of-view,
but also to be able to cancel pending waits if the ring exits before
all events have occurred. The return value of futex_unqueue() is used
to gate who wins the potential race between cancelation and futex
wakeups. Whomever gets a 'ret == 1' return from that claims ownership
of the io_uring futex request.
This is just the barebones wait/wake support. PI or REQUEUE support is
not added at this point, unclear if we might look into that later.
Likewise, explicit timeouts are not supported either. It is expected
that users that need timeouts would do so via the usual io_uring
mechanism to do that using linked timeouts.
The SQE format is as follows:
`addr` Address of futex
`fd` futex2(2) FUTEX2_* flags
`futex_flags` io_uring specific command flags. None valid now.
`addr2` Value of futex
`addr3` Mask to wake/wait
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-06-08 17:57:40 +00:00
|
|
|
ret = io_futex_cancel(ctx, cd, issue_flags);
|
|
|
|
if (ret != -ENOENT)
|
|
|
|
return ret;
|
|
|
|
|
2022-06-16 09:22:02 +00:00
|
|
|
spin_lock(&ctx->completion_lock);
|
2022-05-26 02:36:47 +00:00
|
|
|
if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
|
|
|
|
ret = io_timeout_cancel(ctx, cd);
|
|
|
|
spin_unlock(&ctx->completion_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
|
|
{
|
2022-08-11 07:11:15 +00:00
|
|
|
struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
|
2022-05-26 02:36:47 +00:00
|
|
|
|
|
|
|
if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
|
|
|
|
return -EINVAL;
|
2023-06-23 16:36:43 +00:00
|
|
|
if (sqe->off || sqe->splice_fd_in)
|
2022-05-26 02:36:47 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cancel->addr = READ_ONCE(sqe->addr);
|
|
|
|
cancel->flags = READ_ONCE(sqe->cancel_flags);
|
|
|
|
if (cancel->flags & ~CANCEL_FLAGS)
|
|
|
|
return -EINVAL;
|
|
|
|
if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
|
|
|
|
if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
|
|
|
|
return -EINVAL;
|
|
|
|
cancel->fd = READ_ONCE(sqe->fd);
|
|
|
|
}
|
2023-06-23 16:36:43 +00:00
|
|
|
if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
|
|
|
|
if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
|
|
|
|
return -EINVAL;
|
|
|
|
cancel->opcode = READ_ONCE(sqe->len);
|
|
|
|
}
|
2022-05-26 02:36:47 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-18 15:23:54 +00:00
|
|
|
static int __io_async_cancel(struct io_cancel_data *cd,
|
|
|
|
struct io_uring_task *tctx,
|
2022-05-26 02:36:47 +00:00
|
|
|
unsigned int issue_flags)
|
|
|
|
{
|
|
|
|
bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
|
|
|
|
struct io_ring_ctx *ctx = cd->ctx;
|
|
|
|
struct io_tctx_node *node;
|
|
|
|
int ret, nr = 0;
|
|
|
|
|
|
|
|
do {
|
2022-06-18 15:23:54 +00:00
|
|
|
ret = io_try_cancel(tctx, cd, issue_flags);
|
2022-05-26 02:36:47 +00:00
|
|
|
if (ret == -ENOENT)
|
|
|
|
break;
|
|
|
|
if (!all)
|
|
|
|
return ret;
|
|
|
|
nr++;
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
/* slow path, try all io-wq's */
|
|
|
|
io_ring_submit_lock(ctx, issue_flags);
|
|
|
|
ret = -ENOENT;
|
|
|
|
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
|
2024-03-29 23:19:45 +00:00
|
|
|
ret = io_async_cancel_one(node->task->io_uring, cd);
|
2022-05-26 02:36:47 +00:00
|
|
|
if (ret != -ENOENT) {
|
|
|
|
if (!all)
|
|
|
|
break;
|
|
|
|
nr++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
io_ring_submit_unlock(ctx, issue_flags);
|
|
|
|
return all ? nr : ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
|
|
|
|
{
|
2022-08-11 07:11:15 +00:00
|
|
|
struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
|
2022-05-26 02:36:47 +00:00
|
|
|
struct io_cancel_data cd = {
|
|
|
|
.ctx = req->ctx,
|
|
|
|
.data = cancel->addr,
|
|
|
|
.flags = cancel->flags,
|
2023-06-23 16:36:43 +00:00
|
|
|
.opcode = cancel->opcode,
|
2022-05-26 02:36:47 +00:00
|
|
|
.seq = atomic_inc_return(&req->ctx->cancel_seq),
|
|
|
|
};
|
io_uring: move struct io_kiocb from task_struct to io_uring_task
Rather than store the task_struct itself in struct io_kiocb, store
the io_uring specific task_struct. The life times are the same in terms
of io_uring, and this avoids doing some dereferences through the
task_struct. For the hot path of putting local task references, we can
deref req->tctx instead, which we'll need anyway in that function
regardless of whether it's local or remote references.
This is mostly straight forward, except the original task PF_EXITING
check needs a bit of tweaking. task_work is _always_ run from the
originating task, except in the fallback case, where it's run from a
kernel thread. Replace the potentially racy (in case of fallback work)
checks for req->task->flags with current->flags. It's either the still
the original task, in which case PF_EXITING will be sane, or it has
PF_KTHREAD set, in which case it's fallback work. Both cases should
prevent moving forward with the given request.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-11-03 17:23:38 +00:00
|
|
|
struct io_uring_task *tctx = req->tctx;
|
2022-05-26 02:36:47 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (cd.flags & IORING_ASYNC_CANCEL_FD) {
|
2022-06-18 15:47:04 +00:00
|
|
|
if (req->flags & REQ_F_FIXED_FILE ||
|
|
|
|
cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
|
|
|
|
req->flags |= REQ_F_FIXED_FILE;
|
2022-05-26 02:36:47 +00:00
|
|
|
req->file = io_file_get_fixed(req, cancel->fd,
|
|
|
|
issue_flags);
|
2022-06-18 15:47:04 +00:00
|
|
|
} else {
|
2022-05-26 02:36:47 +00:00
|
|
|
req->file = io_file_get_normal(req, cancel->fd);
|
2022-06-18 15:47:04 +00:00
|
|
|
}
|
2022-05-26 02:36:47 +00:00
|
|
|
if (!req->file) {
|
|
|
|
ret = -EBADF;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
cd.file = req->file;
|
|
|
|
}
|
|
|
|
|
2022-06-18 15:23:54 +00:00
|
|
|
ret = __io_async_cancel(&cd, tctx, issue_flags);
|
2022-05-26 02:36:47 +00:00
|
|
|
done:
|
|
|
|
if (ret < 0)
|
|
|
|
req_set_fail(req);
|
|
|
|
io_req_set_res(req, ret, 0);
|
|
|
|
return IOU_OK;
|
|
|
|
}
|
2022-06-16 09:22:02 +00:00
|
|
|
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
static int __io_sync_cancel(struct io_uring_task *tctx,
|
|
|
|
struct io_cancel_data *cd, int fd)
|
|
|
|
{
|
|
|
|
struct io_ring_ctx *ctx = cd->ctx;
|
|
|
|
|
|
|
|
/* fixed must be grabbed every time since we drop the uring_lock */
|
|
|
|
if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
|
|
|
|
(cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
|
2024-10-27 15:08:31 +00:00
|
|
|
struct io_rsrc_node *node;
|
|
|
|
|
|
|
|
node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
|
|
|
|
if (unlikely(!node))
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
return -EBADF;
|
2024-10-27 15:08:31 +00:00
|
|
|
cd->file = io_slot_file(node);
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
if (!cd->file)
|
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
|
|
|
return __io_async_cancel(cd, tctx, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
|
|
|
|
__must_hold(&ctx->uring_lock)
|
|
|
|
{
|
|
|
|
struct io_cancel_data cd = {
|
|
|
|
.ctx = ctx,
|
|
|
|
.seq = atomic_inc_return(&ctx->cancel_seq),
|
|
|
|
};
|
|
|
|
ktime_t timeout = KTIME_MAX;
|
|
|
|
struct io_uring_sync_cancel_reg sc;
|
2023-11-28 17:29:58 +00:00
|
|
|
struct file *file = NULL;
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
DEFINE_WAIT(wait);
|
2023-06-22 19:03:52 +00:00
|
|
|
int ret, i;
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
|
|
|
|
if (copy_from_user(&sc, arg, sizeof(sc)))
|
|
|
|
return -EFAULT;
|
|
|
|
if (sc.flags & ~CANCEL_FLAGS)
|
|
|
|
return -EINVAL;
|
2023-06-22 19:03:52 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
|
|
|
|
if (sc.pad[i])
|
|
|
|
return -EINVAL;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
|
|
|
|
if (sc.pad2[i])
|
|
|
|
return -EINVAL;
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
|
|
|
|
cd.data = sc.addr;
|
|
|
|
cd.flags = sc.flags;
|
2023-06-22 19:03:52 +00:00
|
|
|
cd.opcode = sc.opcode;
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
|
|
|
|
/* we can grab a normal file descriptor upfront */
|
|
|
|
if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
|
|
|
|
!(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
|
2023-11-28 17:29:58 +00:00
|
|
|
file = fget(sc.fd);
|
|
|
|
if (!file)
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
return -EBADF;
|
2023-11-28 17:29:58 +00:00
|
|
|
cd.file = file;
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
|
|
|
|
|
|
|
|
/* found something, done! */
|
|
|
|
if (ret != -EALREADY)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
|
|
|
|
struct timespec64 ts = {
|
|
|
|
.tv_sec = sc.timeout.tv_sec,
|
|
|
|
.tv_nsec = sc.timeout.tv_nsec
|
|
|
|
};
|
|
|
|
|
|
|
|
timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Keep looking until we get -ENOENT. we'll get woken everytime
|
|
|
|
* every time a request completes and will retry the cancelation.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
cd.seq = atomic_inc_return(&ctx->cancel_seq);
|
|
|
|
|
|
|
|
prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
|
|
|
|
|
|
|
|
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
|
|
|
|
|
2022-12-21 14:11:33 +00:00
|
|
|
mutex_unlock(&ctx->uring_lock);
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
if (ret != -EALREADY)
|
|
|
|
break;
|
|
|
|
|
2022-08-30 12:50:10 +00:00
|
|
|
ret = io_run_task_work_sig(ctx);
|
2022-12-21 14:11:33 +00:00
|
|
|
if (ret < 0)
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
break;
|
|
|
|
ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
|
|
|
|
if (!ret) {
|
|
|
|
ret = -ETIME;
|
|
|
|
break;
|
|
|
|
}
|
2022-12-21 14:11:33 +00:00
|
|
|
mutex_lock(&ctx->uring_lock);
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
} while (1);
|
|
|
|
|
|
|
|
finish_wait(&ctx->cq_wait, &wait);
|
2022-12-21 14:11:33 +00:00
|
|
|
mutex_lock(&ctx->uring_lock);
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
|
|
|
|
if (ret == -ENOENT || ret > 0)
|
|
|
|
ret = 0;
|
|
|
|
out:
|
2023-11-28 17:29:58 +00:00
|
|
|
if (file)
|
|
|
|
fput(file);
|
io_uring: add sync cancelation API through io_uring_register()
The io_uring cancelation API is async, like any other API that we expose
there. For the case of finding a request to cancel, or not finding one,
it is fully sync in that when submission returns, the CQE for both the
cancelation request and the targeted request have been posted to the
CQ ring.
However, if the targeted work is being executed by io-wq, the API can
only start the act of canceling it. This makes it difficult to use in
some circumstances, as the caller then has to wait for the CQEs to come
in and match on the same cancelation data there.
Provide a IORING_REGISTER_SYNC_CANCEL command for io_uring_register()
that does sync cancelations, always. For the io-wq case, it'll wait
for the cancelation to come in before returning. The only expected
returns from this API is:
0 Request found and canceled fine.
> 0 Requests found and canceled. Only happens if asked to
cancel multiple requests, and if the work wasn't in
progress.
-ENOENT Request not found.
-ETIME A timeout on the operation was requested, but the timeout
expired before we could cancel.
and we won't get -EALREADY via this API.
If the timeout value passed in is -1 (tv_sec and tv_nsec), then that
means that no timeout is requested. Otherwise, the timespec passed in
is the amount of time the sync cancel will wait for a successful
cancelation.
Link: https://github.com/axboe/liburing/discussions/608
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-06-18 16:00:50 +00:00
|
|
|
return ret;
|
|
|
|
}
|