iommu/arm-smmu-v3: Reorganize struct arm_smmu_strtab_cfg

The members here are being used for both the linear and the 2 level case,
with the meaning of each item slightly different in the two cases.

Split it into a clean union where both cases have their own struct with
their own logical names and correct types.

Adjust all the users to detect linear/2lvl and use the right sub structure
and types consistently.

Remove STRTAB_STE_DWORDS by changing the last places to use
sizeof(struct arm_smmu_ste).

Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/3-v4-6416877274e1+1af-smmuv3_tidy_jgg@nvidia.com
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Jason Gunthorpe 2024-09-06 12:47:50 -03:00 committed by Will Deacon
parent abb4f9d323
commit 85196f5474
2 changed files with 50 additions and 50 deletions

View File

@ -1712,25 +1712,24 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
{
dma_addr_t l2ptr_dma;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
struct arm_smmu_strtab_l1_desc *desc;
__le64 *dst;
struct arm_smmu_strtab_l2 **l2table;
desc = &cfg->l1_desc[arm_smmu_strtab_l1_idx(sid)];
if (desc->l2ptr)
l2table = &cfg->l2.l2ptrs[arm_smmu_strtab_l1_idx(sid)];
if (*l2table)
return 0;
desc->l2ptr = dmam_alloc_coherent(smmu->dev, sizeof(*desc->l2ptr),
&l2ptr_dma, GFP_KERNEL);
if (!desc->l2ptr) {
*l2table = dmam_alloc_coherent(smmu->dev, sizeof(**l2table),
&l2ptr_dma, GFP_KERNEL);
if (!*l2table) {
dev_err(smmu->dev,
"failed to allocate l2 stream table for SID %u\n",
sid);
return -ENOMEM;
}
arm_smmu_init_initial_stes(desc->l2ptr->stes, STRTAB_NUM_L2_STES);
dst = &cfg->strtab[arm_smmu_strtab_l1_idx(sid)];
arm_smmu_write_strtab_l1_desc((struct arm_smmu_strtab_l1 *)dst,
arm_smmu_init_initial_stes((*l2table)->stes,
ARRAY_SIZE((*l2table)->stes));
arm_smmu_write_strtab_l1_desc(&cfg->l2.l1tab[arm_smmu_strtab_l1_idx(sid)],
l2ptr_dma);
return 0;
}
@ -2487,12 +2486,11 @@ arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
/* Two-level walk */
return &cfg->l1_desc[arm_smmu_strtab_l1_idx(sid)]
.l2ptr->stes[arm_smmu_strtab_l2_idx(sid)];
return &cfg->l2.l2ptrs[arm_smmu_strtab_l1_idx(sid)]
->stes[arm_smmu_strtab_l2_idx(sid)];
} else {
/* Simple linear lookup */
return (struct arm_smmu_ste *)&cfg
->strtab[sid * STRTAB_STE_DWORDS];
return &cfg->linear.table[sid];
}
}
@ -3193,8 +3191,8 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
{
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
return arm_smmu_strtab_l1_idx(sid) < smmu->strtab_cfg.num_l1_ents;
return sid < smmu->strtab_cfg.num_l1_ents;
return arm_smmu_strtab_l1_idx(sid) < smmu->strtab_cfg.l2.num_l1_ents;
return sid < smmu->strtab_cfg.linear.num_ents;
}
static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
@ -3629,7 +3627,6 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
{
void *strtab;
u64 reg;
u32 l1size;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
@ -3637,34 +3634,33 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
arm_smmu_strtab_l1_idx((1 << smmu->sid_bits) - 1);
/* Calculate the L1 size, capped to the SIDSIZE. */
cfg->num_l1_ents = min(last_sid_idx + 1, STRTAB_MAX_L1_ENTRIES);
if (cfg->num_l1_ents <= last_sid_idx)
cfg->l2.num_l1_ents = min(last_sid_idx + 1, STRTAB_MAX_L1_ENTRIES);
if (cfg->l2.num_l1_ents <= last_sid_idx)
dev_warn(smmu->dev,
"2-level strtab only covers %u/%u bits of SID\n",
ilog2(cfg->num_l1_ents * STRTAB_NUM_L2_STES),
ilog2(cfg->l2.num_l1_ents * STRTAB_NUM_L2_STES),
smmu->sid_bits);
l1size = cfg->num_l1_ents * sizeof(struct arm_smmu_strtab_l1);
strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
GFP_KERNEL);
if (!strtab) {
l1size = cfg->l2.num_l1_ents * sizeof(struct arm_smmu_strtab_l1);
cfg->l2.l1tab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->l2.l1_dma,
GFP_KERNEL);
if (!cfg->l2.l1tab) {
dev_err(smmu->dev,
"failed to allocate l1 stream table (%u bytes)\n",
l1size);
return -ENOMEM;
}
cfg->strtab = strtab;
/* Configure strtab_base_cfg for 2 levels */
reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_2LVL);
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE,
ilog2(cfg->num_l1_ents) + STRTAB_SPLIT);
ilog2(cfg->l2.num_l1_ents) + STRTAB_SPLIT);
reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT);
cfg->strtab_base_cfg = reg;
cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents,
sizeof(*cfg->l1_desc), GFP_KERNEL);
if (!cfg->l1_desc)
cfg->l2.l2ptrs = devm_kcalloc(smmu->dev, cfg->l2.num_l1_ents,
sizeof(*cfg->l2.l2ptrs), GFP_KERNEL);
if (!cfg->l2.l2ptrs)
return -ENOMEM;
return 0;
@ -3672,29 +3668,28 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
{
void *strtab;
u64 reg;
u32 size;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
GFP_KERNEL);
if (!strtab) {
size = (1 << smmu->sid_bits) * sizeof(struct arm_smmu_ste);
cfg->linear.table = dmam_alloc_coherent(smmu->dev, size,
&cfg->linear.ste_dma,
GFP_KERNEL);
if (!cfg->linear.table) {
dev_err(smmu->dev,
"failed to allocate linear stream table (%u bytes)\n",
size);
return -ENOMEM;
}
cfg->strtab = strtab;
cfg->num_l1_ents = 1 << smmu->sid_bits;
cfg->linear.num_ents = 1 << smmu->sid_bits;
/* Configure strtab_base_cfg for a linear table covering all SIDs */
reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_LINEAR);
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents);
arm_smmu_init_initial_stes(cfg->linear.table, cfg->linear.num_ents);
return 0;
}
@ -3703,16 +3698,17 @@ static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
u64 reg;
int ret;
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
ret = arm_smmu_init_strtab_2lvl(smmu);
else
reg = smmu->strtab_cfg.l2.l1_dma & STRTAB_BASE_ADDR_MASK;
} else {
ret = arm_smmu_init_strtab_linear(smmu);
reg = smmu->strtab_cfg.linear.ste_dma & STRTAB_BASE_ADDR_MASK;
}
if (ret)
return ret;
/* Set the strtab base address */
reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK;
reg |= STRTAB_BASE_RA;
smmu->strtab_cfg.strtab_base = reg;

View File

@ -613,10 +613,6 @@ struct arm_smmu_priq {
};
/* High-level stream table and context descriptor structures */
struct arm_smmu_strtab_l1_desc {
struct arm_smmu_strtab_l2 *l2ptr;
};
struct arm_smmu_ctx_desc {
u16 asid;
};
@ -649,11 +645,19 @@ struct arm_smmu_s2_cfg {
};
struct arm_smmu_strtab_cfg {
__le64 *strtab;
dma_addr_t strtab_dma;
struct arm_smmu_strtab_l1_desc *l1_desc;
unsigned int num_l1_ents;
union {
struct {
struct arm_smmu_ste *table;
dma_addr_t ste_dma;
unsigned int num_ents;
} linear;
struct {
struct arm_smmu_strtab_l1 *l1tab;
struct arm_smmu_strtab_l2 **l2ptrs;
dma_addr_t l1_dma;
unsigned int num_l1_ents;
} l2;
};
u64 strtab_base;
u32 strtab_base_cfg;
};