blk-integrity: improved sg segment mapping

Make the integrity mapping more like data mapping, blk_rq_map_sg. Use
the request to validate the segment count, and update the callers so
they don't have to.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Link: https://lore.kernel.org/r/20240913191746.2628196-1-kbusch@meta.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Keith Busch 2024-09-13 12:17:46 -07:00 committed by Jens Axboe
parent db5197b554
commit 76c313f658
4 changed files with 18 additions and 18 deletions

View File

@ -62,19 +62,20 @@ int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
*
* Description: Map the integrity vectors in request into a
* scatterlist. The scatterlist must be big enough to hold all
* elements. I.e. sized using blk_rq_count_integrity_sg().
* elements. I.e. sized using blk_rq_count_integrity_sg() or
* rq->nr_integrity_segments.
*/
int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
{
struct bio_vec iv, ivprv = { NULL };
struct request_queue *q = rq->q;
struct scatterlist *sg = NULL;
struct bio *bio = rq->bio;
unsigned int segments = 0;
struct bvec_iter iter;
int prev = 0;
bio_for_each_integrity_vec(iv, bio, iter) {
if (prev) {
if (!biovec_phys_mergeable(q, &ivprv, &iv))
goto new_segment;
@ -102,6 +103,12 @@ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
if (sg)
sg_mark_end(sg);
/*
* Something must have been wrong if the figured number of segment
* is bigger than number of req's physical integrity segments
*/
BUG_ON(segments > rq->nr_integrity_segments);
BUG_ON(segments > queue_max_integrity_segments(q));
return segments;
}
EXPORT_SYMBOL(blk_rq_map_integrity_sg);

View File

@ -1504,8 +1504,8 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
goto out_unmap_sg;
}
req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
rq->bio, req->metadata_sgl->sg_table.sgl);
req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq,
req->metadata_sgl->sg_table.sgl);
*pi_count = ib_dma_map_sg(ibdev,
req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents,

View File

@ -1163,7 +1163,6 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
if (blk_integrity_rq(rq)) {
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
int ivecs;
if (WARN_ON_ONCE(!prot_sdb)) {
/*
@ -1175,19 +1174,15 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
goto out_free_sgtables;
}
ivecs = rq->nr_integrity_segments;
if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
if (sg_alloc_table_chained(&prot_sdb->table,
rq->nr_integrity_segments,
prot_sdb->table.sgl,
SCSI_INLINE_PROT_SG_CNT)) {
ret = BLK_STS_RESOURCE;
goto out_free_sgtables;
}
count = blk_rq_map_integrity_sg(rq->q, rq->bio,
prot_sdb->table.sgl);
BUG_ON(count > ivecs);
BUG_ON(count > queue_max_integrity_segments(rq->q));
count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl);
cmd->prot_sdb = prot_sdb;
cmd->prot_sdb->table.nents = count;
}

View File

@ -25,8 +25,7 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
struct scatterlist *);
int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
ssize_t bytes, u32 seed);
@ -98,8 +97,7 @@ static inline int blk_rq_count_integrity_sg(struct request_queue *q,
{
return 0;
}
static inline int blk_rq_map_integrity_sg(struct request_queue *q,
struct bio *b,
static inline int blk_rq_map_integrity_sg(struct request *q,
struct scatterlist *s)
{
return 0;