mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
A series from Dave Chinner which cleans up and fixes the handling of
nested allocations within stackdepot and page-owner. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZk6MRwAKCRDdBJ7gKXxA jnzeAP9WHW425N7pWmE7rK7n8oXZK9f356dKJMtz2A35Bx6XJgEAuK86kDRA4Kv3 kg8mtwzOIQYKZWzn5VlcvBbtlhjKGwM= =9/Ou -----END PGP SIGNATURE----- Merge tag 'mm-stable-2024-05-22-17-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull more mm updates from Andrew Morton: "A series from Dave Chinner which cleans up and fixes the handling of nested allocations within stackdepot and page-owner" * tag 'mm-stable-2024-05-22-17-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/page-owner: use gfp_nested_mask() instead of open coded masking stackdepot: use gfp_nested_mask() instead of open coded masking mm: lift gfp_kmemleak_mask() to gfp.h
This commit is contained in:
commit
5c6f4d68e2
@ -156,6 +156,31 @@ static inline int gfp_zonelist(gfp_t flags)
|
||||
return ZONELIST_FALLBACK;
|
||||
}
|
||||
|
||||
/*
|
||||
* gfp flag masking for nested internal allocations.
|
||||
*
|
||||
* For code that needs to do allocations inside the public allocation API (e.g.
|
||||
* memory allocation tracking code) the allocations need to obey the caller
|
||||
* allocation context constrains to prevent allocation context mismatches (e.g.
|
||||
* GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock
|
||||
* situations.
|
||||
*
|
||||
* It is also assumed that these nested allocations are for internal kernel
|
||||
* object storage purposes only and are not going to be used for DMA, etc. Hence
|
||||
* we strip out all the zone information and leave just the context information
|
||||
* intact.
|
||||
*
|
||||
* Further, internal allocations must fail before the higher level allocation
|
||||
* can fail, so we must make them fail faster and fail silently. We also don't
|
||||
* want them to deplete emergency reserves. Hence nested allocations must be
|
||||
* prepared for these allocations to fail.
|
||||
*/
|
||||
static inline gfp_t gfp_nested_mask(gfp_t flags)
|
||||
{
|
||||
return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) |
|
||||
(__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN));
|
||||
}
|
||||
|
||||
/*
|
||||
* We get the zone list from the current node and the gfp_mask.
|
||||
* This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
|
||||
|
@ -624,15 +624,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
|
||||
* we won't be able to do that under the lock.
|
||||
*/
|
||||
if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
|
||||
/*
|
||||
* Zero out zone modifiers, as we don't have specific zone
|
||||
* requirements. Keep the flags related to allocation in atomic
|
||||
* contexts, I/O, nolockdep.
|
||||
*/
|
||||
alloc_flags &= ~GFP_ZONEMASK;
|
||||
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
|
||||
alloc_flags |= __GFP_NOWARN;
|
||||
page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
|
||||
page = alloc_pages(gfp_nested_mask(alloc_flags),
|
||||
DEPOT_POOL_ORDER);
|
||||
if (page)
|
||||
prealloc = page_address(page);
|
||||
}
|
||||
|
@ -114,12 +114,6 @@
|
||||
|
||||
#define BYTES_PER_POINTER sizeof(void *)
|
||||
|
||||
/* GFP bitmask for kmemleak internal allocations */
|
||||
#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
|
||||
__GFP_NOLOCKDEP)) | \
|
||||
__GFP_NORETRY | __GFP_NOMEMALLOC | \
|
||||
__GFP_NOWARN)
|
||||
|
||||
/* scanning area inside a memory block */
|
||||
struct kmemleak_scan_area {
|
||||
struct hlist_node node;
|
||||
@ -463,7 +457,8 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
|
||||
|
||||
/* try the slab allocator first */
|
||||
if (object_cache) {
|
||||
object = kmem_cache_alloc_noprof(object_cache, gfp_kmemleak_mask(gfp));
|
||||
object = kmem_cache_alloc_noprof(object_cache,
|
||||
gfp_nested_mask(gfp));
|
||||
if (object)
|
||||
return object;
|
||||
}
|
||||
@ -947,7 +942,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
|
||||
untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
|
||||
|
||||
if (scan_area_cache)
|
||||
area = kmem_cache_alloc_noprof(scan_area_cache, gfp_kmemleak_mask(gfp));
|
||||
area = kmem_cache_alloc_noprof(scan_area_cache,
|
||||
gfp_nested_mask(gfp));
|
||||
|
||||
raw_spin_lock_irqsave(&object->lock, flags);
|
||||
if (!area) {
|
||||
|
@ -168,13 +168,8 @@ static void add_stack_record_to_list(struct stack_record *stack_record,
|
||||
unsigned long flags;
|
||||
struct stack *stack;
|
||||
|
||||
/* Filter gfp_mask the same way stackdepot does, for consistency */
|
||||
gfp_mask &= ~GFP_ZONEMASK;
|
||||
gfp_mask &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
|
||||
gfp_mask |= __GFP_NOWARN;
|
||||
|
||||
set_current_in_page_owner();
|
||||
stack = kmalloc(sizeof(*stack), gfp_mask);
|
||||
stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask));
|
||||
if (!stack) {
|
||||
unset_current_in_page_owner();
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user