mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:46:16 +00:00
sched/fair: Fix external p->on_rq users
Sean noted that ever since commit152e11f6df
("sched/fair: Implement delayed dequeue") KVM's preemption notifiers have started mis-classifying preemption vs blocking. Notably p->on_rq is no longer sufficient to determine if a task is runnable or blocked -- the aforementioned commit introduces tasks that remain on the runqueue even through they will not run again, and should be considered blocked for many cases. Add the task_is_runnable() helper to classify things and audit all external users of the p->on_rq state. Also add a few comments. Fixes:152e11f6df
("sched/fair: Implement delayed dequeue") Reported-by: Sean Christopherson <seanjc@google.com> Tested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lkml.kernel.org/r/20241010091843.GK33184@noisy.programming.kicks-ass.net
This commit is contained in:
parent
c650812419
commit
cd9626e9eb
@ -2133,6 +2133,11 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static inline bool task_is_runnable(struct task_struct *p)
|
||||
{
|
||||
return p->on_rq && !p->se.sched_delayed;
|
||||
}
|
||||
|
||||
extern bool sched_task_on_rq(struct task_struct *p);
|
||||
extern unsigned long get_wchan(struct task_struct *p);
|
||||
extern struct task_struct *cpu_curr_snapshot(int cpu);
|
||||
|
@ -9251,7 +9251,7 @@ static void perf_event_switch(struct task_struct *task,
|
||||
},
|
||||
};
|
||||
|
||||
if (!sched_in && task->on_rq) {
|
||||
if (!sched_in && task_is_runnable(task)) {
|
||||
switch_event.event_id.header.misc |=
|
||||
PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
|
||||
}
|
||||
|
@ -109,7 +109,12 @@ static int __set_task_frozen(struct task_struct *p, void *arg)
|
||||
{
|
||||
unsigned int state = READ_ONCE(p->__state);
|
||||
|
||||
if (p->on_rq)
|
||||
/*
|
||||
* Allow freezing the sched_delayed tasks; they will not execute until
|
||||
* ttwu() fixes them up, so it is safe to swap their state now, instead
|
||||
* of waiting for them to get fully dequeued.
|
||||
*/
|
||||
if (task_is_runnable(p))
|
||||
return 0;
|
||||
|
||||
if (p != current && task_curr(p))
|
||||
|
@ -985,6 +985,15 @@ static bool rcu_tasks_is_holdout(struct task_struct *t)
|
||||
if (!READ_ONCE(t->on_rq))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* t->on_rq && !t->se.sched_delayed *could* be considered sleeping but
|
||||
* since it is a spurious state (it will transition into the
|
||||
* traditional blocked state or get woken up without outside
|
||||
* dependencies), not considering it such should only affect timing.
|
||||
*
|
||||
* Be conservative for now and not include it.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Idle tasks (or idle injection) within the idle loop are RCU-tasks
|
||||
* quiescent states. But CPU boot code performed by the idle task
|
||||
|
@ -548,6 +548,11 @@ sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
|
||||
* ON_RQ_MIGRATING state is used for migration without holding both
|
||||
* rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
|
||||
*
|
||||
* Additionally it is possible to be ->on_rq but still be considered not
|
||||
* runnable when p->se.sched_delayed is true. These tasks are on the runqueue
|
||||
* but will be dequeued as soon as they get picked again. See the
|
||||
* task_is_runnable() helper.
|
||||
*
|
||||
* p->on_cpu <- { 0, 1 }:
|
||||
*
|
||||
* is set by prepare_task() and cleared by finish_task() such that it will be
|
||||
@ -4317,9 +4322,10 @@ static bool __task_needs_rq_lock(struct task_struct *p)
|
||||
* @arg: Argument to function.
|
||||
*
|
||||
* Fix the task in it's current state by avoiding wakeups and or rq operations
|
||||
* and call @func(@arg) on it. This function can use ->on_rq and task_curr()
|
||||
* to work out what the state is, if required. Given that @func can be invoked
|
||||
* with a runqueue lock held, it had better be quite lightweight.
|
||||
* and call @func(@arg) on it. This function can use task_is_runnable() and
|
||||
* task_curr() to work out what the state is, if required. Given that @func
|
||||
* can be invoked with a runqueue lock held, it had better be quite
|
||||
* lightweight.
|
||||
*
|
||||
* Returns:
|
||||
* Whatever @func returns
|
||||
|
@ -434,6 +434,12 @@ static void tick_nohz_kick_task(struct task_struct *tsk)
|
||||
* smp_mb__after_spin_lock()
|
||||
* tick_nohz_task_switch()
|
||||
* LOAD p->tick_dep_mask
|
||||
*
|
||||
* XXX given a task picks up the dependency on schedule(), should we
|
||||
* only care about tasks that are currently on the CPU instead of all
|
||||
* that are on the runqueue?
|
||||
*
|
||||
* That is, does this want to be: task_on_cpu() / task_curr()?
|
||||
*/
|
||||
if (!sched_task_on_rq(tsk))
|
||||
return;
|
||||
|
@ -1485,7 +1485,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
|
||||
/* reset the max latency */
|
||||
tr->max_latency = 0;
|
||||
|
||||
while (p->on_rq) {
|
||||
while (task_is_runnable(p)) {
|
||||
/*
|
||||
* Sleep to make sure the -deadline thread is asleep too.
|
||||
* On virtual machines we can't rely on timings,
|
||||
|
@ -6387,7 +6387,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
|
||||
|
||||
WRITE_ONCE(vcpu->scheduled_out, true);
|
||||
|
||||
if (current->on_rq && vcpu->wants_to_run) {
|
||||
if (task_is_runnable(current) && vcpu->wants_to_run) {
|
||||
WRITE_ONCE(vcpu->preempted, true);
|
||||
WRITE_ONCE(vcpu->ready, true);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user