mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:46:16 +00:00
scsi: core: Add a dma_alignment field to the host and host template
Get drivers out of the business of having to call the block layer DMA alignment limits helpers themselves. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20240409143748.980206-8-hch@lst.de Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: John Garry <john.g.garry@oracle.com> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
6248d7f771
commit
5b7dfbeff9
@ -1500,12 +1500,6 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
|
||||
|
||||
sdev->allow_restart = 1;
|
||||
|
||||
/*
|
||||
* SBP-2 does not require any alignment, but we set it anyway
|
||||
* for compatibility with earlier versions of this driver.
|
||||
*/
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
|
||||
|
||||
if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
|
||||
sdev->inquiry_len = 36;
|
||||
|
||||
|
@ -129,6 +129,7 @@ static const struct scsi_host_template mptfc_driver_template = {
|
||||
.sg_tablesize = MPT_SCSI_SG_DEPTH,
|
||||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 7,
|
||||
.dma_alignment = 511,
|
||||
.shost_groups = mptscsih_host_attr_groups,
|
||||
};
|
||||
|
||||
|
@ -2020,6 +2020,7 @@ static const struct scsi_host_template mptsas_driver_template = {
|
||||
.sg_tablesize = MPT_SCSI_SG_DEPTH,
|
||||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 7,
|
||||
.dma_alignment = 511,
|
||||
.shost_groups = mptscsih_host_attr_groups,
|
||||
.no_write_same = 1,
|
||||
};
|
||||
|
@ -2438,8 +2438,6 @@ mptscsih_slave_configure(struct scsi_device *sdev)
|
||||
"tagged %d, simple %d\n",
|
||||
ioc->name,sdev->tagged_supported, sdev->simple_tags));
|
||||
|
||||
blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -843,6 +843,7 @@ static const struct scsi_host_template mptspi_driver_template = {
|
||||
.sg_tablesize = MPT_SCSI_SG_DEPTH,
|
||||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 7,
|
||||
.dma_alignment = 511,
|
||||
.shost_groups = mptscsih_host_attr_groups,
|
||||
};
|
||||
|
||||
|
@ -478,6 +478,12 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
|
||||
else
|
||||
shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
||||
|
||||
/* 32-byte (dword) is a common minimum for HBAs. */
|
||||
if (sht->dma_alignment)
|
||||
shost->dma_alignment = sht->dma_alignment;
|
||||
else
|
||||
shost->dma_alignment = 3;
|
||||
|
||||
/*
|
||||
* assume a 4GB boundary, if not set
|
||||
*/
|
||||
|
@ -943,6 +943,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
|
||||
shost->max_id = 0;
|
||||
shost->max_channel = 0;
|
||||
shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
|
||||
shost->dma_alignment = 0;
|
||||
|
||||
rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
|
||||
if (rc < 0)
|
||||
@ -1065,7 +1066,6 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
|
||||
if (conn->datadgst_en)
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
|
||||
sdev->request_queue);
|
||||
blk_queue_dma_alignment(sdev->request_queue, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1957,9 +1957,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
|
||||
scsi_qla_host_t *vha = shost_priv(sdev->host);
|
||||
struct req_que *req = vha->req;
|
||||
|
||||
if (IS_T10_PI_CAPABLE(vha->hw))
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
|
||||
|
||||
scsi_change_queue_depth(sdev, req->max_q_depth);
|
||||
return 0;
|
||||
}
|
||||
@ -3575,6 +3572,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
QLA_SG_ALL : 128;
|
||||
}
|
||||
|
||||
if (IS_T10_PI_CAPABLE(base_vha->hw))
|
||||
host->dma_alignment = 0x7;
|
||||
|
||||
ret = scsi_add_host(host, &pdev->dev);
|
||||
if (ret)
|
||||
goto probe_failed;
|
||||
|
@ -1985,15 +1985,8 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
|
||||
lim->seg_boundary_mask = shost->dma_boundary;
|
||||
lim->max_segment_size = shost->max_segment_size;
|
||||
lim->virt_boundary_mask = shost->virt_boundary_mask;
|
||||
|
||||
/*
|
||||
* Set a reasonable default alignment: The larger of 32-byte (dword),
|
||||
* which is a common minimum for HBAs, and the minimum DMA alignment,
|
||||
* which is set by the platform.
|
||||
*
|
||||
* Devices that require a bigger alignment can increase it later.
|
||||
*/
|
||||
lim->dma_alignment = max(4, dma_get_cache_alignment()) - 1;
|
||||
lim->dma_alignment = max_t(unsigned int,
|
||||
shost->dma_alignment, dma_get_cache_alignment() - 1);
|
||||
|
||||
if (shost->no_highmem)
|
||||
lim->bounce = BLK_BOUNCE_HIGH;
|
||||
|
@ -70,18 +70,6 @@ static int slave_alloc(struct scsi_device *sdev)
|
||||
|
||||
static int slave_configure(struct scsi_device *sdev)
|
||||
{
|
||||
/*
|
||||
* Scatter-gather buffers (all but the last) must have a length
|
||||
* divisible by the bulk maxpacket size. Otherwise a data packet
|
||||
* would end up being short, causing a premature end to the data
|
||||
* transfer. Since high-speed bulk pipes have a maxpacket size
|
||||
* of 512, we'll use that as the scsi device queue's DMA alignment
|
||||
* mask. Guaranteeing proper alignment of the first buffer will
|
||||
* have the desired effect because, except at the beginning and
|
||||
* the end, scatter-gather buffers follow page boundaries.
|
||||
*/
|
||||
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
|
||||
|
||||
/* Set the SCSI level to at least 2. We'll leave it at 3 if that's
|
||||
* what is originally reported. We need this to avoid confusing
|
||||
* the SCSI layer with devices that report 0 or 1, but need 10-byte
|
||||
@ -219,6 +207,18 @@ static const struct scsi_host_template rtsx_host_template = {
|
||||
/* limit the total size of a transfer to 120 KB */
|
||||
.max_sectors = 240,
|
||||
|
||||
/*
|
||||
* Scatter-gather buffers (all but the last) must have a length
|
||||
* divisible by the bulk maxpacket size. Otherwise a data packet
|
||||
* would end up being short, causing a premature end to the data
|
||||
* transfer. Since high-speed bulk pipes have a maxpacket size
|
||||
* of 512, we'll use that as the scsi device queue's DMA alignment
|
||||
* mask. Guaranteeing proper alignment of the first buffer will
|
||||
* have the desired effect because, except at the beginning and
|
||||
* the end, scatter-gather buffers follow page boundaries.
|
||||
*/
|
||||
.dma_alignment = 511,
|
||||
|
||||
/* emulated HBA */
|
||||
.emulated = 1,
|
||||
|
||||
|
@ -328,12 +328,6 @@ static int mts_slave_alloc (struct scsi_device *s)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mts_slave_configure (struct scsi_device *s)
|
||||
{
|
||||
blk_queue_dma_alignment(s->request_queue, (512 - 1));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mts_scsi_abort(struct scsi_cmnd *srb)
|
||||
{
|
||||
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
|
||||
@ -631,8 +625,8 @@ static const struct scsi_host_template mts_scsi_host_template = {
|
||||
.can_queue = 1,
|
||||
.this_id = -1,
|
||||
.emulated = 1,
|
||||
.dma_alignment = 511,
|
||||
.slave_alloc = mts_slave_alloc,
|
||||
.slave_configure = mts_slave_configure,
|
||||
.max_sectors= 256, /* 128 K */
|
||||
};
|
||||
|
||||
|
@ -75,12 +75,6 @@ static int slave_alloc (struct scsi_device *sdev)
|
||||
*/
|
||||
sdev->inquiry_len = 36;
|
||||
|
||||
/*
|
||||
* Some host controllers may have alignment requirements.
|
||||
* We'll play it safe by requiring 512-byte alignment always.
|
||||
*/
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
|
||||
|
||||
/* Tell the SCSI layer if we know there is more than one LUN */
|
||||
if (us->protocol == USB_PR_BULK && us->max_lun > 0)
|
||||
sdev->sdev_bflags |= BLIST_FORCELUN;
|
||||
@ -638,6 +632,11 @@ static const struct scsi_host_template usb_stor_host_template = {
|
||||
/* lots of sg segments can be handled */
|
||||
.sg_tablesize = SG_MAX_SEGMENTS,
|
||||
|
||||
/*
|
||||
* Some host controllers may have alignment requirements.
|
||||
* We'll play it safe by requiring 512-byte alignment always.
|
||||
*/
|
||||
.dma_alignment = 511,
|
||||
|
||||
/*
|
||||
* Limit the total size of a transfer to 120 KB.
|
||||
|
@ -824,13 +824,6 @@ static int uas_slave_alloc(struct scsi_device *sdev)
|
||||
|
||||
sdev->hostdata = devinfo;
|
||||
|
||||
/*
|
||||
* The protocol has no requirements on alignment in the strict sense.
|
||||
* Controllers may or may not have alignment restrictions.
|
||||
* As this is not exported, we use an extremely conservative guess.
|
||||
*/
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
|
||||
|
||||
if (devinfo->flags & US_FL_MAX_SECTORS_64)
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, 64);
|
||||
else if (devinfo->flags & US_FL_MAX_SECTORS_240)
|
||||
@ -912,6 +905,12 @@ static const struct scsi_host_template uas_host_template = {
|
||||
.eh_device_reset_handler = uas_eh_device_reset_handler,
|
||||
.this_id = -1,
|
||||
.skip_settle_delay = 1,
|
||||
/*
|
||||
* The protocol has no requirements on alignment in the strict sense.
|
||||
* Controllers may or may not have alignment restrictions.
|
||||
* As this is not exported, we use an extremely conservative guess.
|
||||
*/
|
||||
.dma_alignment = 511,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.cmd_size = sizeof(struct uas_cmd_info),
|
||||
};
|
||||
|
@ -405,6 +405,8 @@ struct scsi_host_template {
|
||||
*/
|
||||
unsigned int max_segment_size;
|
||||
|
||||
unsigned int dma_alignment;
|
||||
|
||||
/*
|
||||
* DMA scatter gather segment boundary limit. A segment crossing this
|
||||
* boundary will be split in two.
|
||||
@ -614,6 +616,7 @@ struct Scsi_Host {
|
||||
unsigned int max_sectors;
|
||||
unsigned int opt_sectors;
|
||||
unsigned int max_segment_size;
|
||||
unsigned int dma_alignment;
|
||||
unsigned long dma_boundary;
|
||||
unsigned long virt_boundary_mask;
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user