mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:46:16 +00:00
dma-mapping: fix vmap and mmap of noncontiougs allocations
Commitb5c58b2fdc
("dma-mapping: direct calls for dma-iommu") switched to use direct calls to dma-iommu, but missed the dma_vmap_noncontiguous, dma_vunmap_noncontiguous and dma_mmap_noncontiguous behavior keyed off the presence of the alloc_noncontiguous method. Fix this by removing the now unused alloc_noncontiguous and free_noncontiguous methods and moving the vmapping and mmaping of the noncontiguous allocations into the iommu code, as it is the only provider of actually noncontiguous allocations. Fixes:b5c58b2fdc
("dma-mapping: direct calls for dma-iommu") Reported-by: Xi Ruoyao <xry111@xry111.site> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Leon Romanovsky <leon@kernel.org> Tested-by: Xi Ruoyao <xry111@xry111.site>
This commit is contained in:
parent
88264981f2
commit
bb0e391975
@ -1038,6 +1038,21 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is the actual return value from the iommu_dma_alloc_noncontiguous.
|
||||||
|
*
|
||||||
|
* The users of the DMA API should only care about the sg_table, but to make
|
||||||
|
* the DMA-API internal vmaping and freeing easier we stash away the page
|
||||||
|
* array as well (except for the fallback case). This can go away any time,
|
||||||
|
* e.g. when a vmap-variant that takes a scatterlist comes along.
|
||||||
|
*/
|
||||||
|
struct dma_sgt_handle {
|
||||||
|
struct sg_table sgt;
|
||||||
|
struct page **pages;
|
||||||
|
};
|
||||||
|
#define sgt_handle(sgt) \
|
||||||
|
container_of((sgt), struct dma_sgt_handle, sgt)
|
||||||
|
|
||||||
struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
|
struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
|
||||||
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
|
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
|
||||||
{
|
{
|
||||||
@ -1066,6 +1081,24 @@ void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
|
|||||||
kfree(sh);
|
kfree(sh);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
|
||||||
|
struct sg_table *sgt)
|
||||||
|
{
|
||||||
|
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
|
int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
size_t size, struct sg_table *sgt)
|
||||||
|
{
|
||||||
|
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
|
||||||
|
return -ENXIO;
|
||||||
|
return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
|
||||||
|
}
|
||||||
|
|
||||||
void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||||
size_t size, enum dma_data_direction dir)
|
size_t size, enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
|
@ -24,11 +24,6 @@ struct dma_map_ops {
|
|||||||
gfp_t gfp);
|
gfp_t gfp);
|
||||||
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
|
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
|
||||||
dma_addr_t dma_handle, enum dma_data_direction dir);
|
dma_addr_t dma_handle, enum dma_data_direction dir);
|
||||||
struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
|
|
||||||
enum dma_data_direction dir, gfp_t gfp,
|
|
||||||
unsigned long attrs);
|
|
||||||
void (*free_noncontiguous)(struct device *dev, size_t size,
|
|
||||||
struct sg_table *sgt, enum dma_data_direction dir);
|
|
||||||
int (*mmap)(struct device *, struct vm_area_struct *,
|
int (*mmap)(struct device *, struct vm_area_struct *,
|
||||||
void *, dma_addr_t, size_t, unsigned long attrs);
|
void *, dma_addr_t, size_t, unsigned long attrs);
|
||||||
|
|
||||||
@ -206,20 +201,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_DMA_GLOBAL_POOL */
|
#endif /* CONFIG_DMA_GLOBAL_POOL */
|
||||||
|
|
||||||
/*
|
|
||||||
* This is the actual return value from the ->alloc_noncontiguous method.
|
|
||||||
* The users of the DMA API should only care about the sg_table, but to make
|
|
||||||
* the DMA-API internal vmaping and freeing easier we stash away the page
|
|
||||||
* array as well (except for the fallback case). This can go away any time,
|
|
||||||
* e.g. when a vmap-variant that takes a scatterlist comes along.
|
|
||||||
*/
|
|
||||||
struct dma_sgt_handle {
|
|
||||||
struct sg_table sgt;
|
|
||||||
struct page **pages;
|
|
||||||
};
|
|
||||||
#define sgt_handle(sgt) \
|
|
||||||
container_of((sgt), struct dma_sgt_handle, sgt)
|
|
||||||
|
|
||||||
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||||
unsigned long attrs);
|
unsigned long attrs);
|
||||||
|
@ -44,6 +44,12 @@ struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
|
|||||||
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
|
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
|
||||||
void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
|
void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
|
||||||
struct sg_table *sgt, enum dma_data_direction dir);
|
struct sg_table *sgt, enum dma_data_direction dir);
|
||||||
|
void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
|
||||||
|
struct sg_table *sgt);
|
||||||
|
#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \
|
||||||
|
vunmap(vaddr);
|
||||||
|
int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
size_t size, struct sg_table *sgt);
|
||||||
void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||||
size_t size, enum dma_data_direction dir);
|
size_t size, enum dma_data_direction dir);
|
||||||
void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
|
void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
|
||||||
|
@ -750,7 +750,6 @@ static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
|
|||||||
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
|
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
|
||||||
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
|
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
|
||||||
{
|
{
|
||||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
struct sg_table *sgt;
|
struct sg_table *sgt;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
|
if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
|
||||||
@ -758,9 +757,7 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
|
|||||||
if (WARN_ON_ONCE(gfp & __GFP_COMP))
|
if (WARN_ON_ONCE(gfp & __GFP_COMP))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (ops && ops->alloc_noncontiguous)
|
if (use_dma_iommu(dev))
|
||||||
sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
|
|
||||||
else if (use_dma_iommu(dev))
|
|
||||||
sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs);
|
sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs);
|
||||||
else
|
else
|
||||||
sgt = alloc_single_sgt(dev, size, dir, gfp);
|
sgt = alloc_single_sgt(dev, size, dir, gfp);
|
||||||
@ -786,13 +783,10 @@ static void free_single_sgt(struct device *dev, size_t size,
|
|||||||
void dma_free_noncontiguous(struct device *dev, size_t size,
|
void dma_free_noncontiguous(struct device *dev, size_t size,
|
||||||
struct sg_table *sgt, enum dma_data_direction dir)
|
struct sg_table *sgt, enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
trace_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir, 0);
|
trace_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir, 0);
|
||||||
debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
|
debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
|
||||||
if (ops && ops->free_noncontiguous)
|
|
||||||
ops->free_noncontiguous(dev, size, sgt, dir);
|
if (use_dma_iommu(dev))
|
||||||
else if (use_dma_iommu(dev))
|
|
||||||
iommu_dma_free_noncontiguous(dev, size, sgt, dir);
|
iommu_dma_free_noncontiguous(dev, size, sgt, dir);
|
||||||
else
|
else
|
||||||
free_single_sgt(dev, size, sgt, dir);
|
free_single_sgt(dev, size, sgt, dir);
|
||||||
@ -802,37 +796,26 @@ EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
|
|||||||
void *dma_vmap_noncontiguous(struct device *dev, size_t size,
|
void *dma_vmap_noncontiguous(struct device *dev, size_t size,
|
||||||
struct sg_table *sgt)
|
struct sg_table *sgt)
|
||||||
{
|
{
|
||||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
if (ops && ops->alloc_noncontiguous)
|
if (use_dma_iommu(dev))
|
||||||
return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
|
return iommu_dma_vmap_noncontiguous(dev, size, sgt);
|
||||||
|
|
||||||
return page_address(sg_page(sgt->sgl));
|
return page_address(sg_page(sgt->sgl));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
|
EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
|
||||||
|
|
||||||
void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
|
void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
|
||||||
{
|
{
|
||||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
if (use_dma_iommu(dev))
|
||||||
|
iommu_dma_vunmap_noncontiguous(dev, vaddr);
|
||||||
if (ops && ops->alloc_noncontiguous)
|
|
||||||
vunmap(vaddr);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
|
EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
|
||||||
|
|
||||||
int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
|
int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
|
||||||
size_t size, struct sg_table *sgt)
|
size_t size, struct sg_table *sgt)
|
||||||
{
|
{
|
||||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
if (use_dma_iommu(dev))
|
||||||
|
return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt);
|
||||||
if (ops && ops->alloc_noncontiguous) {
|
|
||||||
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
if (vma->vm_pgoff >= count ||
|
|
||||||
vma_pages(vma) > count - vma->vm_pgoff)
|
|
||||||
return -ENXIO;
|
|
||||||
return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
|
|
||||||
}
|
|
||||||
return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
|
return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
|
EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
|
||||||
|
Loading…
Reference in New Issue
Block a user