kfence: save freeing stack trace at calling time instead of freeing time

For kmem_cache with SLAB_TYPESAFE_BY_RCU, the freeing trace stack at
calling kmem_cache_free() is more useful. While the following stack is
meaningless and provides no help:
  freed by task 46 on cpu 0 at 656.840729s:
   rcu_do_batch+0x1ab/0x540
   nocb_cb_wait+0x8f/0x260
   rcu_nocb_cb_kthread+0x25/0x80
   kthread+0xd2/0x100
   ret_from_fork+0x34/0x50
   ret_from_fork_asm+0x1a/0x30

Link: https://lkml.kernel.org/r/20240812095517.2357-1-dtcccc@linux.alibaba.com
Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Tianchen Ding 2024-08-12 17:55:17 +08:00 committed by Andrew Morton
parent c64d66153b
commit c36be0cdf6
3 changed files with 34 additions and 13 deletions

View File

@ -273,6 +273,13 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m
return pageaddr; return pageaddr;
} }
static inline bool kfence_obj_allocated(const struct kfence_metadata *meta)
{
enum kfence_object_state state = READ_ONCE(meta->state);
return state == KFENCE_OBJECT_ALLOCATED || state == KFENCE_OBJECT_RCU_FREEING;
}
/* /*
* Update the object's metadata state, including updating the alloc/free stacks * Update the object's metadata state, including updating the alloc/free stacks
* depending on the state transition. * depending on the state transition.
@ -282,10 +289,14 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex
unsigned long *stack_entries, size_t num_stack_entries) unsigned long *stack_entries, size_t num_stack_entries)
{ {
struct kfence_track *track = struct kfence_track *track =
next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track;
lockdep_assert_held(&meta->lock); lockdep_assert_held(&meta->lock);
/* Stack has been saved when calling rcu, skip. */
if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING)
goto out;
if (stack_entries) { if (stack_entries) {
memcpy(track->stack_entries, stack_entries, memcpy(track->stack_entries, stack_entries,
num_stack_entries * sizeof(stack_entries[0])); num_stack_entries * sizeof(stack_entries[0]));
@ -301,6 +312,7 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex
track->cpu = raw_smp_processor_id(); track->cpu = raw_smp_processor_id();
track->ts_nsec = local_clock(); /* Same source as printk timestamps. */ track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
out:
/* /*
* Pairs with READ_ONCE() in * Pairs with READ_ONCE() in
* kfence_shutdown_cache(), * kfence_shutdown_cache(),
@ -506,7 +518,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
raw_spin_lock_irqsave(&meta->lock, flags); raw_spin_lock_irqsave(&meta->lock, flags);
if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { if (!kfence_obj_allocated(meta) || meta->addr != (unsigned long)addr) {
/* Invalid or double-free, bail out. */ /* Invalid or double-free, bail out. */
atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
kfence_report_error((unsigned long)addr, false, NULL, meta, kfence_report_error((unsigned long)addr, false, NULL, meta,
@ -784,7 +796,7 @@ static void kfence_check_all_canary(void)
for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
struct kfence_metadata *meta = &kfence_metadata[i]; struct kfence_metadata *meta = &kfence_metadata[i];
if (meta->state == KFENCE_OBJECT_ALLOCATED) if (kfence_obj_allocated(meta))
check_canary(meta); check_canary(meta);
} }
} }
@ -1010,12 +1022,11 @@ void kfence_shutdown_cache(struct kmem_cache *s)
* the lock will not help, as different critical section * the lock will not help, as different critical section
* serialization will have the same outcome. * serialization will have the same outcome.
*/ */
if (READ_ONCE(meta->cache) != s || if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta))
READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
continue; continue;
raw_spin_lock_irqsave(&meta->lock, flags); raw_spin_lock_irqsave(&meta->lock, flags);
in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED; in_use = meta->cache == s && kfence_obj_allocated(meta);
raw_spin_unlock_irqrestore(&meta->lock, flags); raw_spin_unlock_irqrestore(&meta->lock, flags);
if (in_use) { if (in_use) {
@ -1160,11 +1171,19 @@ void __kfence_free(void *addr)
* the object, as the object page may be recycled for other-typed * the object, as the object page may be recycled for other-typed
* objects once it has been freed. meta->cache may be NULL if the cache * objects once it has been freed. meta->cache may be NULL if the cache
* was destroyed. * was destroyed.
* Save the stack trace here so that reports show where the user freed
* the object.
*/ */
if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) {
unsigned long flags;
raw_spin_lock_irqsave(&meta->lock, flags);
metadata_update_state(meta, KFENCE_OBJECT_RCU_FREEING, NULL, 0);
raw_spin_unlock_irqrestore(&meta->lock, flags);
call_rcu(&meta->rcu_head, rcu_guarded_free); call_rcu(&meta->rcu_head, rcu_guarded_free);
else } else {
kfence_guarded_free(addr, meta, false); kfence_guarded_free(addr, meta, false);
}
} }
bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
@ -1188,14 +1207,14 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
int distance = 0; int distance = 0;
meta = addr_to_metadata(addr - PAGE_SIZE); meta = addr_to_metadata(addr - PAGE_SIZE);
if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { if (meta && kfence_obj_allocated(meta)) {
to_report = meta; to_report = meta;
/* Data race ok; distance calculation approximate. */ /* Data race ok; distance calculation approximate. */
distance = addr - data_race(meta->addr + meta->size); distance = addr - data_race(meta->addr + meta->size);
} }
meta = addr_to_metadata(addr + PAGE_SIZE); meta = addr_to_metadata(addr + PAGE_SIZE);
if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { if (meta && kfence_obj_allocated(meta)) {
/* Data race ok; distance calculation approximate. */ /* Data race ok; distance calculation approximate. */
if (!to_report || distance > data_race(meta->addr) - addr) if (!to_report || distance > data_race(meta->addr) - addr)
to_report = meta; to_report = meta;

View File

@ -38,6 +38,7 @@
enum kfence_object_state { enum kfence_object_state {
KFENCE_OBJECT_UNUSED, /* Object is unused. */ KFENCE_OBJECT_UNUSED, /* Object is unused. */
KFENCE_OBJECT_ALLOCATED, /* Object is currently allocated. */ KFENCE_OBJECT_ALLOCATED, /* Object is currently allocated. */
KFENCE_OBJECT_RCU_FREEING, /* Object was allocated, and then being freed by rcu. */
KFENCE_OBJECT_FREED, /* Object was allocated, and then freed. */ KFENCE_OBJECT_FREED, /* Object was allocated, and then freed. */
}; };

View File

@ -114,7 +114,8 @@ static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadat
/* Timestamp matches printk timestamp format. */ /* Timestamp matches printk timestamp format. */
seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus (%lu.%06lus ago):\n", seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus (%lu.%06lus ago):\n",
show_alloc ? "allocated" : "freed", track->pid, show_alloc ? "allocated" : meta->state == KFENCE_OBJECT_RCU_FREEING ?
"rcu freeing" : "freed", track->pid,
track->cpu, (unsigned long)ts_sec, rem_nsec / 1000, track->cpu, (unsigned long)ts_sec, rem_nsec / 1000,
(unsigned long)interval_nsec, rem_interval_nsec / 1000); (unsigned long)interval_nsec, rem_interval_nsec / 1000);
@ -149,7 +150,7 @@ void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *met
kfence_print_stack(seq, meta, true); kfence_print_stack(seq, meta, true);
if (meta->state == KFENCE_OBJECT_FREED) { if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING) {
seq_con_printf(seq, "\n"); seq_con_printf(seq, "\n");
kfence_print_stack(seq, meta, false); kfence_print_stack(seq, meta, false);
} }
@ -318,7 +319,7 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
kpp->kp_slab_cache = meta->cache; kpp->kp_slab_cache = meta->cache;
kpp->kp_objp = (void *)meta->addr; kpp->kp_objp = (void *)meta->addr;
kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack); kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
if (meta->state == KFENCE_OBJECT_FREED) if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING)
kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack); kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
/* get_stack_skipnr() ensures the first entry is outside allocator. */ /* get_stack_skipnr() ensures the first entry is outside allocator. */
kpp->kp_ret = kpp->kp_stack[0]; kpp->kp_ret = kpp->kp_stack[0];