mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:46:16 +00:00
mm: clean up unmap_region() argument list
With the only caller to unmap_region() being the error path of mmap_region(), the argument list can be significantly reduced. Link: https://lkml.kernel.org/r/20240830040101.822209-14-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Bert Karwatzki <spasswolf@web.de> Cc: Jeff Xu <jeffxu@chromium.org> Cc: Jiri Olsa <olsajiri@gmail.com> Cc: Kees Cook <kees@kernel.org> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Mark Brown <broonie@kernel.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: "Paul E. McKenney" <paulmck@kernel.org> Cc: Paul Moore <paul@paul-moore.com> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
9c3ebeda8f
commit
94f59ea591
@ -1615,8 +1615,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
||||
|
||||
vma_iter_set(&vmi, vma->vm_end);
|
||||
/* Undo any partial mapping done by a device driver. */
|
||||
unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start,
|
||||
vma->vm_end, vma->vm_end, true);
|
||||
unmap_region(&vmi.mas, vma, prev, next);
|
||||
}
|
||||
if (writable_file_mapping)
|
||||
mapping_unmap_writable(file->f_mapping);
|
||||
|
17
mm/vma.c
17
mm/vma.c
@ -155,22 +155,21 @@ void remove_vma(struct vm_area_struct *vma, bool unreachable)
|
||||
*
|
||||
* Called with the mm semaphore held.
|
||||
*/
|
||||
void unmap_region(struct mm_struct *mm, struct ma_state *mas,
|
||||
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
||||
struct vm_area_struct *next, unsigned long start,
|
||||
unsigned long end, unsigned long tree_end, bool mm_wr_locked)
|
||||
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
|
||||
struct vm_area_struct *prev, struct vm_area_struct *next)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct mmu_gather tlb;
|
||||
unsigned long mt_start = mas->index;
|
||||
|
||||
lru_add_drain();
|
||||
tlb_gather_mmu(&tlb, mm);
|
||||
update_hiwater_rss(mm);
|
||||
unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
|
||||
mas_set(mas, mt_start);
|
||||
unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
|
||||
/* mm_wr_locked = */ true);
|
||||
mas_set(mas, vma->vm_end);
|
||||
free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
|
||||
next ? next->vm_start : USER_PGTABLES_CEILING,
|
||||
mm_wr_locked);
|
||||
next ? next->vm_start : USER_PGTABLES_CEILING,
|
||||
/* mm_wr_locked = */ true);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
|
||||
|
6
mm/vma.h
6
mm/vma.h
@ -149,10 +149,8 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
|
||||
|
||||
void remove_vma(struct vm_area_struct *vma, bool unreachable);
|
||||
|
||||
void unmap_region(struct mm_struct *mm, struct ma_state *mas,
|
||||
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
||||
struct vm_area_struct *next, unsigned long start,
|
||||
unsigned long end, unsigned long tree_end, bool mm_wr_locked);
|
||||
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
|
||||
struct vm_area_struct *prev, struct vm_area_struct *next);
|
||||
|
||||
/* Required by mmap_region(). */
|
||||
bool
|
||||
|
Loading…
Reference in New Issue
Block a user