block: add a rq_list type

Replace the semi-open coded request list helpers with a proper rq_list
type that mirrors the bio_list and has head and tail pointers.  Besides
better type safety this actually allows to insert at the tail of the
list, which will be useful soon.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20241113152050.157179-5-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2024-11-13 16:20:44 +01:00 committed by Jens Axboe
parent e8225ab150
commit a3396b9999
11 changed files with 102 additions and 86 deletions

View File

@ -1120,8 +1120,8 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
return; return;
plug->cur_ktime = 0; plug->cur_ktime = 0;
plug->mq_list = NULL; rq_list_init(&plug->mq_list);
plug->cached_rq = NULL; rq_list_init(&plug->cached_rqs);
plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
plug->rq_count = 0; plug->rq_count = 0;
plug->multiple_queues = false; plug->multiple_queues = false;
@ -1217,7 +1217,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
* queue for cached requests, we don't want a blocked task holding * queue for cached requests, we don't want a blocked task holding
* up a queue freeze/quiesce event. * up a queue freeze/quiesce event.
*/ */
if (unlikely(!rq_list_empty(plug->cached_rq))) if (unlikely(!rq_list_empty(&plug->cached_rqs)))
blk_mq_free_plug_rqs(plug); blk_mq_free_plug_rqs(plug);
plug->cur_ktime = 0; plug->cur_ktime = 0;

View File

@ -1179,7 +1179,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
struct blk_plug *plug = current->plug; struct blk_plug *plug = current->plug;
struct request *rq; struct request *rq;
if (!plug || rq_list_empty(plug->mq_list)) if (!plug || rq_list_empty(&plug->mq_list))
return false; return false;
rq_list_for_each(&plug->mq_list, rq) { rq_list_for_each(&plug->mq_list, rq) {

View File

@ -478,7 +478,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
prefetch(tags->static_rqs[tag]); prefetch(tags->static_rqs[tag]);
tag_mask &= ~(1UL << i); tag_mask &= ~(1UL << i);
rq = blk_mq_rq_ctx_init(data, tags, tag); rq = blk_mq_rq_ctx_init(data, tags, tag);
rq_list_add(data->cached_rq, rq); rq_list_add_head(data->cached_rqs, rq);
nr++; nr++;
} }
if (!(data->rq_flags & RQF_SCHED_TAGS)) if (!(data->rq_flags & RQF_SCHED_TAGS))
@ -487,7 +487,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
data->nr_tags -= nr; data->nr_tags -= nr;
return rq_list_pop(data->cached_rq); return rq_list_pop(data->cached_rqs);
} }
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
@ -584,7 +584,7 @@ static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
.flags = flags, .flags = flags,
.cmd_flags = opf, .cmd_flags = opf,
.nr_tags = plug->nr_ios, .nr_tags = plug->nr_ios,
.cached_rq = &plug->cached_rq, .cached_rqs = &plug->cached_rqs,
}; };
struct request *rq; struct request *rq;
@ -609,14 +609,14 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
if (!plug) if (!plug)
return NULL; return NULL;
if (rq_list_empty(plug->cached_rq)) { if (rq_list_empty(&plug->cached_rqs)) {
if (plug->nr_ios == 1) if (plug->nr_ios == 1)
return NULL; return NULL;
rq = blk_mq_rq_cache_fill(q, plug, opf, flags); rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
if (!rq) if (!rq)
return NULL; return NULL;
} else { } else {
rq = rq_list_peek(&plug->cached_rq); rq = rq_list_peek(&plug->cached_rqs);
if (!rq || rq->q != q) if (!rq || rq->q != q)
return NULL; return NULL;
@ -625,7 +625,7 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
return NULL; return NULL;
plug->cached_rq = rq_list_next(rq); rq_list_pop(&plug->cached_rqs);
blk_mq_rq_time_init(rq, blk_time_get_ns()); blk_mq_rq_time_init(rq, blk_time_get_ns());
} }
@ -802,7 +802,7 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug)
{ {
struct request *rq; struct request *rq;
while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) while ((rq = rq_list_pop(&plug->cached_rqs)) != NULL)
blk_mq_free_request(rq); blk_mq_free_request(rq);
} }
@ -1392,8 +1392,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
*/ */
if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS)) if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
plug->has_elevator = true; plug->has_elevator = true;
rq->rq_next = NULL; rq_list_add_head(&plug->mq_list, rq);
rq_list_add(&plug->mq_list, rq);
plug->rq_count++; plug->rq_count++;
} }
@ -2785,7 +2784,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
blk_status_t ret = BLK_STS_OK; blk_status_t ret = BLK_STS_OK;
while ((rq = rq_list_pop(&plug->mq_list))) { while ((rq = rq_list_pop(&plug->mq_list))) {
bool last = rq_list_empty(plug->mq_list); bool last = rq_list_empty(&plug->mq_list);
if (hctx != rq->mq_hctx) { if (hctx != rq->mq_hctx) {
if (hctx) { if (hctx) {
@ -2828,8 +2827,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
{ {
struct blk_mq_hw_ctx *this_hctx = NULL; struct blk_mq_hw_ctx *this_hctx = NULL;
struct blk_mq_ctx *this_ctx = NULL; struct blk_mq_ctx *this_ctx = NULL;
struct request *requeue_list = NULL; struct rq_list requeue_list = {};
struct request **requeue_lastp = &requeue_list;
unsigned int depth = 0; unsigned int depth = 0;
bool is_passthrough = false; bool is_passthrough = false;
LIST_HEAD(list); LIST_HEAD(list);
@ -2843,12 +2841,12 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
is_passthrough = blk_rq_is_passthrough(rq); is_passthrough = blk_rq_is_passthrough(rq);
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx || } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
is_passthrough != blk_rq_is_passthrough(rq)) { is_passthrough != blk_rq_is_passthrough(rq)) {
rq_list_add_tail(&requeue_lastp, rq); rq_list_add_tail(&requeue_list, rq);
continue; continue;
} }
list_add(&rq->queuelist, &list); list_add(&rq->queuelist, &list);
depth++; depth++;
} while (!rq_list_empty(plug->mq_list)); } while (!rq_list_empty(&plug->mq_list));
plug->mq_list = requeue_list; plug->mq_list = requeue_list;
trace_block_unplug(this_hctx->queue, depth, !from_sched); trace_block_unplug(this_hctx->queue, depth, !from_sched);
@ -2903,19 +2901,19 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
if (q->mq_ops->queue_rqs) { if (q->mq_ops->queue_rqs) {
blk_mq_run_dispatch_ops(q, blk_mq_run_dispatch_ops(q,
__blk_mq_flush_plug_list(q, plug)); __blk_mq_flush_plug_list(q, plug));
if (rq_list_empty(plug->mq_list)) if (rq_list_empty(&plug->mq_list))
return; return;
} }
blk_mq_run_dispatch_ops(q, blk_mq_run_dispatch_ops(q,
blk_mq_plug_issue_direct(plug)); blk_mq_plug_issue_direct(plug));
if (rq_list_empty(plug->mq_list)) if (rq_list_empty(&plug->mq_list))
return; return;
} }
do { do {
blk_mq_dispatch_plug_list(plug, from_schedule); blk_mq_dispatch_plug_list(plug, from_schedule);
} while (!rq_list_empty(plug->mq_list)); } while (!rq_list_empty(&plug->mq_list));
} }
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
@ -2980,7 +2978,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
if (plug) { if (plug) {
data.nr_tags = plug->nr_ios; data.nr_tags = plug->nr_ios;
plug->nr_ios = 1; plug->nr_ios = 1;
data.cached_rq = &plug->cached_rq; data.cached_rqs = &plug->cached_rqs;
} }
rq = __blk_mq_alloc_requests(&data); rq = __blk_mq_alloc_requests(&data);
@ -3003,7 +3001,7 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
if (!plug) if (!plug)
return NULL; return NULL;
rq = rq_list_peek(&plug->cached_rq); rq = rq_list_peek(&plug->cached_rqs);
if (!rq || rq->q != q) if (!rq || rq->q != q)
return NULL; return NULL;
if (type != rq->mq_hctx->type && if (type != rq->mq_hctx->type &&
@ -3017,14 +3015,14 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
struct bio *bio) struct bio *bio)
{ {
WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq); if (rq_list_pop(&plug->cached_rqs) != rq)
WARN_ON_ONCE(1);
/* /*
* If any qos ->throttle() end up blocking, we will have flushed the * If any qos ->throttle() end up blocking, we will have flushed the
* plug and hence killed the cached_rq list as well. Pop this entry * plug and hence killed the cached_rq list as well. Pop this entry
* before we throttle. * before we throttle.
*/ */
plug->cached_rq = rq_list_next(rq);
rq_qos_throttle(rq->q, bio); rq_qos_throttle(rq->q, bio);
blk_mq_rq_time_init(rq, blk_time_get_ns()); blk_mq_rq_time_init(rq, blk_time_get_ns());

View File

@ -155,7 +155,7 @@ struct blk_mq_alloc_data {
/* allocate multiple requests/tags in one go */ /* allocate multiple requests/tags in one go */
unsigned int nr_tags; unsigned int nr_tags;
struct request **cached_rq; struct rq_list *cached_rqs;
/* input & output parameter */ /* input & output parameter */
struct blk_mq_ctx *ctx; struct blk_mq_ctx *ctx;

View File

@ -1638,10 +1638,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK; return BLK_STS_OK;
} }
static void null_queue_rqs(struct request **rqlist) static void null_queue_rqs(struct rq_list *rqlist)
{ {
struct request *requeue_list = NULL; struct rq_list requeue_list = {};
struct request **requeue_lastp = &requeue_list;
struct blk_mq_queue_data bd = { }; struct blk_mq_queue_data bd = { };
blk_status_t ret; blk_status_t ret;
@ -1651,8 +1650,8 @@ static void null_queue_rqs(struct request **rqlist)
bd.rq = rq; bd.rq = rq;
ret = null_queue_rq(rq->mq_hctx, &bd); ret = null_queue_rq(rq->mq_hctx, &bd);
if (ret != BLK_STS_OK) if (ret != BLK_STS_OK)
rq_list_add_tail(&requeue_lastp, rq); rq_list_add_tail(&requeue_list, rq);
} while (!rq_list_empty(*rqlist)); } while (!rq_list_empty(rqlist));
*rqlist = requeue_list; *rqlist = requeue_list;
} }

View File

@ -472,7 +472,7 @@ static bool virtblk_prep_rq_batch(struct request *req)
} }
static void virtblk_add_req_batch(struct virtio_blk_vq *vq, static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
struct request **rqlist) struct rq_list *rqlist)
{ {
struct request *req; struct request *req;
unsigned long flags; unsigned long flags;
@ -499,11 +499,10 @@ static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
virtqueue_notify(vq->vq); virtqueue_notify(vq->vq);
} }
static void virtio_queue_rqs(struct request **rqlist) static void virtio_queue_rqs(struct rq_list *rqlist)
{ {
struct request *submit_list = NULL; struct rq_list submit_list = { };
struct request *requeue_list = NULL; struct rq_list requeue_list = { };
struct request **requeue_lastp = &requeue_list;
struct virtio_blk_vq *vq = NULL; struct virtio_blk_vq *vq = NULL;
struct request *req; struct request *req;
@ -515,9 +514,9 @@ static void virtio_queue_rqs(struct request **rqlist)
vq = this_vq; vq = this_vq;
if (virtblk_prep_rq_batch(req)) if (virtblk_prep_rq_batch(req))
rq_list_add(&submit_list, req); /* reverse order */ rq_list_add_head(&submit_list, req); /* reverse order */
else else
rq_list_add_tail(&requeue_lastp, req); rq_list_add_tail(&requeue_list, req);
} }
if (vq) if (vq)

View File

@ -649,7 +649,7 @@ static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
found = apple_nvme_poll_cq(q, &iob); found = apple_nvme_poll_cq(q, &iob);
if (!rq_list_empty(iob.req_list)) if (!rq_list_empty(&iob.req_list))
apple_nvme_complete_batch(&iob); apple_nvme_complete_batch(&iob);
return found; return found;

View File

@ -904,7 +904,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK; return BLK_STS_OK;
} }
static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist)
{ {
struct request *req; struct request *req;
@ -932,11 +932,10 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
} }
static void nvme_queue_rqs(struct request **rqlist) static void nvme_queue_rqs(struct rq_list *rqlist)
{ {
struct request *submit_list = NULL; struct rq_list submit_list = { };
struct request *requeue_list = NULL; struct rq_list requeue_list = { };
struct request **requeue_lastp = &requeue_list;
struct nvme_queue *nvmeq = NULL; struct nvme_queue *nvmeq = NULL;
struct request *req; struct request *req;
@ -946,9 +945,9 @@ static void nvme_queue_rqs(struct request **rqlist)
nvmeq = req->mq_hctx->driver_data; nvmeq = req->mq_hctx->driver_data;
if (nvme_prep_rq_batch(nvmeq, req)) if (nvme_prep_rq_batch(nvmeq, req))
rq_list_add(&submit_list, req); /* reverse order */ rq_list_add_head(&submit_list, req); /* reverse order */
else else
rq_list_add_tail(&requeue_lastp, req); rq_list_add_tail(&requeue_list, req);
} }
if (nvmeq) if (nvmeq)
@ -1080,7 +1079,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
DEFINE_IO_COMP_BATCH(iob); DEFINE_IO_COMP_BATCH(iob);
if (nvme_poll_cq(nvmeq, &iob)) { if (nvme_poll_cq(nvmeq, &iob)) {
if (!rq_list_empty(iob.req_list)) if (!rq_list_empty(&iob.req_list))
nvme_pci_complete_batch(&iob); nvme_pci_complete_batch(&iob);
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -229,44 +229,60 @@ static inline unsigned short req_get_ioprio(struct request *req)
#define rq_dma_dir(rq) \ #define rq_dma_dir(rq) \
(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
#define rq_list_add(listptr, rq) do { \ static inline int rq_list_empty(const struct rq_list *rl)
(rq)->rq_next = *(listptr); \ {
*(listptr) = rq; \ return rl->head == NULL;
} while (0) }
#define rq_list_add_tail(lastpptr, rq) do { \ static inline void rq_list_init(struct rq_list *rl)
(rq)->rq_next = NULL; \ {
**(lastpptr) = rq; \ rl->head = NULL;
*(lastpptr) = &rq->rq_next; \ rl->tail = NULL;
} while (0) }
#define rq_list_pop(listptr) \ static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq)
({ \ {
struct request *__req = NULL; \ rq->rq_next = NULL;
if ((listptr) && *(listptr)) { \ if (rl->tail)
__req = *(listptr); \ rl->tail->rq_next = rq;
*(listptr) = __req->rq_next; \ else
} \ rl->head = rq;
__req; \ rl->tail = rq;
}) }
#define rq_list_peek(listptr) \ static inline void rq_list_add_head(struct rq_list *rl, struct request *rq)
({ \ {
struct request *__req = NULL; \ rq->rq_next = rl->head;
if ((listptr) && *(listptr)) \ rl->head = rq;
__req = *(listptr); \ if (!rl->tail)
__req; \ rl->tail = rq;
}) }
#define rq_list_for_each(listptr, pos) \ static inline struct request *rq_list_pop(struct rq_list *rl)
for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) {
struct request *rq = rl->head;
#define rq_list_for_each_safe(listptr, pos, nxt) \ if (rq) {
for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \ rl->head = rl->head->rq_next;
pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL) if (!rl->head)
rl->tail = NULL;
rq->rq_next = NULL;
}
#define rq_list_next(rq) (rq)->rq_next return rq;
#define rq_list_empty(list) ((list) == (struct request *) NULL) }
static inline struct request *rq_list_peek(struct rq_list *rl)
{
return rl->head;
}
#define rq_list_for_each(rl, pos) \
for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next)
#define rq_list_for_each_safe(rl, pos, nxt) \
for (pos = rq_list_peek((rl)), nxt = pos->rq_next; \
pos; pos = nxt, nxt = pos ? pos->rq_next : NULL)
/** /**
* enum blk_eh_timer_return - How the timeout handler should proceed * enum blk_eh_timer_return - How the timeout handler should proceed
@ -559,7 +575,7 @@ struct blk_mq_ops {
* empty the @rqlist completely, then the rest will be queued * empty the @rqlist completely, then the rest will be queued
* individually by the block layer upon return. * individually by the block layer upon return.
*/ */
void (*queue_rqs)(struct request **rqlist); void (*queue_rqs)(struct rq_list *rqlist);
/** /**
* @get_budget: Reserve budget before queue request, once .queue_rq is * @get_budget: Reserve budget before queue request, once .queue_rq is
@ -868,7 +884,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
else if (iob->complete != complete) else if (iob->complete != complete)
return false; return false;
iob->need_ts |= blk_mq_need_time_stamp(req); iob->need_ts |= blk_mq_need_time_stamp(req);
rq_list_add(&iob->req_list, req); rq_list_add_head(&iob->req_list, req);
return true; return true;
} }

View File

@ -1007,6 +1007,11 @@ extern void blk_put_queue(struct request_queue *);
void blk_mark_disk_dead(struct gendisk *disk); void blk_mark_disk_dead(struct gendisk *disk);
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
struct rq_list {
struct request *head;
struct request *tail;
};
/* /*
* blk_plug permits building a queue of related requests by holding the I/O * blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests * fragments for a short period. This allows merging of sequential requests
@ -1019,10 +1024,10 @@ void blk_mark_disk_dead(struct gendisk *disk);
* blk_flush_plug() is called. * blk_flush_plug() is called.
*/ */
struct blk_plug { struct blk_plug {
struct request *mq_list; /* blk-mq requests */ struct rq_list mq_list; /* blk-mq requests */
/* if ios_left is > 1, we can batch tag/rq allocations */ /* if ios_left is > 1, we can batch tag/rq allocations */
struct request *cached_rq; struct rq_list cached_rqs;
u64 cur_ktime; u64 cur_ktime;
unsigned short nr_ios; unsigned short nr_ios;
@ -1684,7 +1689,7 @@ int bdev_thaw(struct block_device *bdev);
void bdev_fput(struct file *bdev_file); void bdev_fput(struct file *bdev_file);
struct io_comp_batch { struct io_comp_batch {
struct request *req_list; struct rq_list req_list;
bool need_ts; bool need_ts;
void (*complete)(struct io_comp_batch *); void (*complete)(struct io_comp_batch *);
}; };

View File

@ -1160,12 +1160,12 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
poll_flags |= BLK_POLL_ONESHOT; poll_flags |= BLK_POLL_ONESHOT;
/* iopoll may have completed current req */ /* iopoll may have completed current req */
if (!rq_list_empty(iob.req_list) || if (!rq_list_empty(&iob.req_list) ||
READ_ONCE(req->iopoll_completed)) READ_ONCE(req->iopoll_completed))
break; break;
} }
if (!rq_list_empty(iob.req_list)) if (!rq_list_empty(&iob.req_list))
iob.complete(&iob); iob.complete(&iob);
else if (!pos) else if (!pos)
return 0; return 0;