mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:46:16 +00:00
mm: unconditionally close VMAs on error
Incorrect invocation of VMA callbacks when the VMA is no longer in a
consistent state is bug prone and risky to perform.
With regards to the important vm_ops->close() callback We have gone to
great lengths to try to track whether or not we ought to close VMAs.
Rather than doing so and risking making a mistake somewhere, instead
unconditionally close and reset vma->vm_ops to an empty dummy operations
set with a NULL .close operator.
We introduce a new function to do so - vma_close() - and simplify existing
vms logic which tracked whether we needed to close or not.
This simplifies the logic, avoids incorrect double-calling of the .close()
callback and allows us to update error paths to simply call vma_close()
unconditionally - making VMA closure idempotent.
Link: https://lkml.kernel.org/r/28e89dda96f68c505cb6f8e9fc9b57c3e9f74b42.1730224667.git.lorenzo.stoakes@oracle.com
Fixes: deb0f65628
("mm/mmap: undo ->mmap() when arch_validate_flags() fails")
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reported-by: Jann Horn <jannh@google.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Jann Horn <jannh@google.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Helge Deller <deller@gmx.de>
Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
3dd6ed34ce
commit
4080ef1579
@ -135,6 +135,24 @@ static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the VMA has a close hook then close it, and since closing it might leave
|
||||||
|
* it in an inconsistent state which makes the use of any hooks suspect, clear
|
||||||
|
* them down by installing dummy empty hooks.
|
||||||
|
*/
|
||||||
|
static inline void vma_close(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
if (vma->vm_ops && vma->vm_ops->close) {
|
||||||
|
vma->vm_ops->close(vma);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The mapping is in an inconsistent state, and no further hooks
|
||||||
|
* may be invoked upon it.
|
||||||
|
*/
|
||||||
|
vma->vm_ops = &vma_dummy_vm_ops;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
|
|
||||||
/* Flags for folio_pte_batch(). */
|
/* Flags for folio_pte_batch(). */
|
||||||
|
@ -1573,8 +1573,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
|||||||
return addr;
|
return addr;
|
||||||
|
|
||||||
close_and_free_vma:
|
close_and_free_vma:
|
||||||
if (file && !vms.closed_vm_ops && vma->vm_ops && vma->vm_ops->close)
|
vma_close(vma);
|
||||||
vma->vm_ops->close(vma);
|
|
||||||
|
|
||||||
if (file || vma->vm_file) {
|
if (file || vma->vm_file) {
|
||||||
unmap_and_free_vma:
|
unmap_and_free_vma:
|
||||||
@ -1934,7 +1933,7 @@ void exit_mmap(struct mm_struct *mm)
|
|||||||
do {
|
do {
|
||||||
if (vma->vm_flags & VM_ACCOUNT)
|
if (vma->vm_flags & VM_ACCOUNT)
|
||||||
nr_accounted += vma_pages(vma);
|
nr_accounted += vma_pages(vma);
|
||||||
remove_vma(vma, /* unreachable = */ true, /* closed = */ false);
|
remove_vma(vma, /* unreachable = */ true);
|
||||||
count++;
|
count++;
|
||||||
cond_resched();
|
cond_resched();
|
||||||
vma = vma_next(&vmi);
|
vma = vma_next(&vmi);
|
||||||
|
@ -589,8 +589,7 @@ static int delete_vma_from_mm(struct vm_area_struct *vma)
|
|||||||
*/
|
*/
|
||||||
static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
|
static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
if (vma->vm_ops && vma->vm_ops->close)
|
vma_close(vma);
|
||||||
vma->vm_ops->close(vma);
|
|
||||||
if (vma->vm_file)
|
if (vma->vm_file)
|
||||||
fput(vma->vm_file);
|
fput(vma->vm_file);
|
||||||
put_nommu_region(vma->vm_region);
|
put_nommu_region(vma->vm_region);
|
||||||
|
14
mm/vma.c
14
mm/vma.c
@ -323,11 +323,10 @@ static bool can_vma_merge_right(struct vma_merge_struct *vmg,
|
|||||||
/*
|
/*
|
||||||
* Close a vm structure and free it.
|
* Close a vm structure and free it.
|
||||||
*/
|
*/
|
||||||
void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed)
|
void remove_vma(struct vm_area_struct *vma, bool unreachable)
|
||||||
{
|
{
|
||||||
might_sleep();
|
might_sleep();
|
||||||
if (!closed && vma->vm_ops && vma->vm_ops->close)
|
vma_close(vma);
|
||||||
vma->vm_ops->close(vma);
|
|
||||||
if (vma->vm_file)
|
if (vma->vm_file)
|
||||||
fput(vma->vm_file);
|
fput(vma->vm_file);
|
||||||
mpol_put(vma_policy(vma));
|
mpol_put(vma_policy(vma));
|
||||||
@ -1115,9 +1114,7 @@ void vms_clean_up_area(struct vma_munmap_struct *vms,
|
|||||||
vms_clear_ptes(vms, mas_detach, true);
|
vms_clear_ptes(vms, mas_detach, true);
|
||||||
mas_set(mas_detach, 0);
|
mas_set(mas_detach, 0);
|
||||||
mas_for_each(mas_detach, vma, ULONG_MAX)
|
mas_for_each(mas_detach, vma, ULONG_MAX)
|
||||||
if (vma->vm_ops && vma->vm_ops->close)
|
vma_close(vma);
|
||||||
vma->vm_ops->close(vma);
|
|
||||||
vms->closed_vm_ops = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1160,7 +1157,7 @@ void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
|
|||||||
/* Remove and clean up vmas */
|
/* Remove and clean up vmas */
|
||||||
mas_set(mas_detach, 0);
|
mas_set(mas_detach, 0);
|
||||||
mas_for_each(mas_detach, vma, ULONG_MAX)
|
mas_for_each(mas_detach, vma, ULONG_MAX)
|
||||||
remove_vma(vma, /* = */ false, vms->closed_vm_ops);
|
remove_vma(vma, /* unreachable = */ false);
|
||||||
|
|
||||||
vm_unacct_memory(vms->nr_accounted);
|
vm_unacct_memory(vms->nr_accounted);
|
||||||
validate_mm(mm);
|
validate_mm(mm);
|
||||||
@ -1684,8 +1681,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
|||||||
return new_vma;
|
return new_vma;
|
||||||
|
|
||||||
out_vma_link:
|
out_vma_link:
|
||||||
if (new_vma->vm_ops && new_vma->vm_ops->close)
|
vma_close(new_vma);
|
||||||
new_vma->vm_ops->close(new_vma);
|
|
||||||
|
|
||||||
if (new_vma->vm_file)
|
if (new_vma->vm_file)
|
||||||
fput(new_vma->vm_file);
|
fput(new_vma->vm_file);
|
||||||
|
4
mm/vma.h
4
mm/vma.h
@ -42,7 +42,6 @@ struct vma_munmap_struct {
|
|||||||
int vma_count; /* Number of vmas that will be removed */
|
int vma_count; /* Number of vmas that will be removed */
|
||||||
bool unlock; /* Unlock after the munmap */
|
bool unlock; /* Unlock after the munmap */
|
||||||
bool clear_ptes; /* If there are outstanding PTE to be cleared */
|
bool clear_ptes; /* If there are outstanding PTE to be cleared */
|
||||||
bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */
|
|
||||||
/* 1 byte hole */
|
/* 1 byte hole */
|
||||||
unsigned long nr_pages; /* Number of pages being removed */
|
unsigned long nr_pages; /* Number of pages being removed */
|
||||||
unsigned long locked_vm; /* Number of locked pages */
|
unsigned long locked_vm; /* Number of locked pages */
|
||||||
@ -198,7 +197,6 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
|
|||||||
vms->unmap_start = FIRST_USER_ADDRESS;
|
vms->unmap_start = FIRST_USER_ADDRESS;
|
||||||
vms->unmap_end = USER_PGTABLES_CEILING;
|
vms->unmap_end = USER_PGTABLES_CEILING;
|
||||||
vms->clear_ptes = false;
|
vms->clear_ptes = false;
|
||||||
vms->closed_vm_ops = false;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -269,7 +267,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
|
|||||||
unsigned long start, size_t len, struct list_head *uf,
|
unsigned long start, size_t len, struct list_head *uf,
|
||||||
bool unlock);
|
bool unlock);
|
||||||
|
|
||||||
void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed);
|
void remove_vma(struct vm_area_struct *vma, bool unreachable);
|
||||||
|
|
||||||
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
|
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
|
||||||
struct vm_area_struct *prev, struct vm_area_struct *next);
|
struct vm_area_struct *prev, struct vm_area_struct *next);
|
||||||
|
Loading…
Reference in New Issue
Block a user