mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
debugobjects: Dont destroy kmem cache in init()
debug_objects_mem_init() is invoked from mm_core_init() before work queues are available. If debug_objects_mem_init() destroys the kmem cache in the error path it causes an Oops in __queue_work(): Oops: Oops: 0000 [#1] PREEMPT SMP PTI RIP: 0010:__queue_work+0x35/0x6a0 queue_work_on+0x66/0x70 flush_all_cpus_locked+0xdf/0x1a0 __kmem_cache_shutdown+0x2f/0x340 kmem_cache_destroy+0x4e/0x150 mm_core_init+0x9e/0x120 start_kernel+0x298/0x800 x86_64_start_reservations+0x18/0x30 x86_64_start_kernel+0xc5/0xe0 common_startup_64+0x12c/0x138 Further the object cache pointer is used in various places to check for early boot operation. It is exposed before the replacments for the static boot time objects are allocated and the self test operates on it. This can be avoided by: 1) Running the self test with the static boot objects 2) Exposing it only after the replacement objects have been added to the pool. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/all/20241007164913.137021337@linutronix.de
This commit is contained in:
parent
813fd07858
commit
55fb412ef7
@ -1211,7 +1211,7 @@ static __initconst const struct debug_obj_descr descr_type_test = {
|
|||||||
|
|
||||||
static __initdata struct self_test obj = { .static_init = 0 };
|
static __initdata struct self_test obj = { .static_init = 0 };
|
||||||
|
|
||||||
static void __init debug_objects_selftest(void)
|
static bool __init debug_objects_selftest(void)
|
||||||
{
|
{
|
||||||
int fixups, oldfixups, warnings, oldwarnings;
|
int fixups, oldfixups, warnings, oldwarnings;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -1280,9 +1280,10 @@ static void __init debug_objects_selftest(void)
|
|||||||
descr_test = NULL;
|
descr_test = NULL;
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
return !!debug_objects_enabled;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void debug_objects_selftest(void) { }
|
static inline bool debug_objects_selftest(void) { return true; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1302,18 +1303,21 @@ void __init debug_objects_early_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert the statically allocated objects to dynamic ones:
|
* Convert the statically allocated objects to dynamic ones.
|
||||||
|
* debug_objects_mem_init() is called early so only one CPU is up and
|
||||||
|
* interrupts are disabled, which means it is safe to replace the active
|
||||||
|
* object references.
|
||||||
*/
|
*/
|
||||||
static int __init debug_objects_replace_static_objects(void)
|
static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
|
||||||
{
|
{
|
||||||
struct debug_bucket *db = obj_hash;
|
struct debug_bucket *db = obj_hash;
|
||||||
struct hlist_node *tmp;
|
|
||||||
struct debug_obj *obj, *new;
|
struct debug_obj *obj, *new;
|
||||||
|
struct hlist_node *tmp;
|
||||||
HLIST_HEAD(objects);
|
HLIST_HEAD(objects);
|
||||||
int i, cnt = 0;
|
int i, cnt = 0;
|
||||||
|
|
||||||
for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
|
for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
|
||||||
obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
|
obj = kmem_cache_zalloc(cache, GFP_KERNEL);
|
||||||
if (!obj)
|
if (!obj)
|
||||||
goto free;
|
goto free;
|
||||||
hlist_add_head(&obj->node, &objects);
|
hlist_add_head(&obj->node, &objects);
|
||||||
@ -1321,12 +1325,6 @@ static int __init debug_objects_replace_static_objects(void)
|
|||||||
|
|
||||||
debug_objects_allocated += i;
|
debug_objects_allocated += i;
|
||||||
|
|
||||||
/*
|
|
||||||
* debug_objects_mem_init() is now called early that only one CPU is up
|
|
||||||
* and interrupts have been disabled, so it is safe to replace the
|
|
||||||
* active object references.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Replace the statically allocated objects list with the allocated
|
* Replace the statically allocated objects list with the allocated
|
||||||
* objects list.
|
* objects list.
|
||||||
@ -1347,15 +1345,14 @@ static int __init debug_objects_replace_static_objects(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_debug("%d of %d active objects replaced\n",
|
pr_debug("%d of %d active objects replaced\n", cnt, obj_pool_used);
|
||||||
cnt, obj_pool_used);
|
return true;
|
||||||
return 0;
|
|
||||||
free:
|
free:
|
||||||
hlist_for_each_entry_safe(obj, tmp, &objects, node) {
|
hlist_for_each_entry_safe(obj, tmp, &objects, node) {
|
||||||
hlist_del(&obj->node);
|
hlist_del(&obj->node);
|
||||||
kmem_cache_free(obj_cache, obj);
|
kmem_cache_free(cache, obj);
|
||||||
}
|
}
|
||||||
return -ENOMEM;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1366,6 +1363,7 @@ static int __init debug_objects_replace_static_objects(void)
|
|||||||
*/
|
*/
|
||||||
void __init debug_objects_mem_init(void)
|
void __init debug_objects_mem_init(void)
|
||||||
{
|
{
|
||||||
|
struct kmem_cache *cache;
|
||||||
int cpu, extras;
|
int cpu, extras;
|
||||||
|
|
||||||
if (!debug_objects_enabled)
|
if (!debug_objects_enabled)
|
||||||
@ -1380,29 +1378,33 @@ void __init debug_objects_mem_init(void)
|
|||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
|
INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
|
||||||
|
|
||||||
obj_cache = kmem_cache_create("debug_objects_cache",
|
if (!debug_objects_selftest())
|
||||||
sizeof (struct debug_obj), 0,
|
|
||||||
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
|
|
||||||
NULL);
|
|
||||||
|
|
||||||
if (!obj_cache || debug_objects_replace_static_objects()) {
|
|
||||||
debug_objects_enabled = 0;
|
|
||||||
kmem_cache_destroy(obj_cache);
|
|
||||||
pr_warn("out of memory.\n");
|
|
||||||
return;
|
return;
|
||||||
} else
|
|
||||||
debug_objects_selftest();
|
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
|
||||||
cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
|
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
|
||||||
object_cpu_offline);
|
|
||||||
#endif
|
if (!cache || !debug_objects_replace_static_objects(cache)) {
|
||||||
|
debug_objects_enabled = 0;
|
||||||
|
pr_warn("Out of memory.\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Increase the thresholds for allocating and freeing objects
|
* Adjust the thresholds for allocating and freeing objects
|
||||||
* according to the number of possible CPUs available in the system.
|
* according to the number of possible CPUs available in the
|
||||||
|
* system.
|
||||||
*/
|
*/
|
||||||
extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
|
extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
|
||||||
debug_objects_pool_size += extras;
|
debug_objects_pool_size += extras;
|
||||||
debug_objects_pool_min_level += extras;
|
debug_objects_pool_min_level += extras;
|
||||||
|
|
||||||
|
/* Everything worked. Expose the cache */
|
||||||
|
obj_cache = cache;
|
||||||
|
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
|
||||||
|
object_cpu_offline);
|
||||||
|
#endif
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user