From 9c602adb799e72ee537c0c7ca7e828c3fe2acad6 Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Thu, 29 Aug 2024 11:11:11 +0800 Subject: [PATCH 01/10] sched/deadline: Fix schedstats vs deadline servers In dl_server_start(), when schedstats is enabled, the following happens: dl_server_start() dl_se->dl_server = 1; enqueue_dl_entity() update_stats_enqueue_dl() __schedstats_from_dl_se() dl_task_of() BUG_ON(dl_server(dl_se)); Since only tasks have schedstats and internal entries do not, avoid trying to update stats in this case. Fixes: 63ba8422f876 ("sched/deadline: Introduce deadline servers") Signed-off-by: Huang Shijie Signed-off-by: Peter Zijlstra (Intel) Acked-by: Juri Lelli Link: https://lkml.kernel.org/r/20240829031111.12142-1-shijie@os.amperecomputing.com --- kernel/sched/deadline.c | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 0f2df67f710b..2e84037b634f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1896,46 +1896,40 @@ static inline bool __dl_less(struct rb_node *a, const struct rb_node *b) return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline); } -static inline struct sched_statistics * +static __always_inline struct sched_statistics * __schedstats_from_dl_se(struct sched_dl_entity *dl_se) { + if (!schedstat_enabled()) + return NULL; + + if (dl_server(dl_se)) + return NULL; + return &dl_task_of(dl_se)->stats; } static inline void update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; - - if (!schedstat_enabled()) - return; - - stats = __schedstats_from_dl_se(dl_se); - __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); + struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); + if (stats) + __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); } static inline void update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; - - if (!schedstat_enabled()) - return; - - stats = __schedstats_from_dl_se(dl_se); - __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); + struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); + if (stats) + __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); } static inline void update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; - - if (!schedstat_enabled()) - return; - - stats = __schedstats_from_dl_se(dl_se); - __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); + struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); + if (stats) + __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); } static inline void From 75b6499024a6c1a4ef0288f280534a5c54269076 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Thu, 29 Aug 2024 15:53:53 +0200 Subject: [PATCH 02/10] sched/fair: Properly deactivate sched_delayed task upon class change __sched_setscheduler() goes through an enqueue/dequeue cycle like so: flags := DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; prev_class->dequeue_task(rq, p, flags); new_class->enqueue_task(rq, p, flags); when prev_class := fair_sched_class, this is followed by: dequeue_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SLEEP); the idea being that since the task has switched classes, we need to drop the sched_delayed logic and have that task be deactivated per its previous dequeue_task(..., DEQUEUE_SLEEP). Unfortunately, this leaves the task on_rq. This is missing the tail end of dequeue_entities() that issues __block_task(), which __sched_setscheduler() won't have done due to not using DEQUEUE_DELAYED - not that it should, as it is pretty much a fair_sched_class specific thing. Make switched_from_fair() properly deactivate sched_delayed tasks upon class changes via __block_task(), as if a dequeue_task(..., DEQUEUE_DELAYED) had been issued. Fixes: 2e0199df252a ("sched/fair: Prepare exit/cleanup paths for delayed_dequeue") Reported-by: "Paul E. McKenney" Reported-by: Chen Yu Signed-off-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20240829135353.1524260-1-vschneid@redhat.com --- kernel/sched/fair.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fea057b311f6..3a3286df282f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5456,6 +5456,13 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); +static inline void finish_delayed_dequeue_entity(struct sched_entity *se) +{ + se->sched_delayed = 0; + if (sched_feat(DELAY_ZERO) && se->vlag > 0) + se->vlag = 0; +} + static bool dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { @@ -5531,11 +5538,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) update_min_vruntime(cfs_rq); - if (flags & DEQUEUE_DELAYED) { - se->sched_delayed = 0; - if (sched_feat(DELAY_ZERO) && se->vlag > 0) - se->vlag = 0; - } + if (flags & DEQUEUE_DELAYED) + finish_delayed_dequeue_entity(se); if (cfs_rq->nr_running == 0) update_idle_cfs_rq_clock_pelt(cfs_rq); @@ -13107,11 +13111,16 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) * and we cannot use DEQUEUE_DELAYED. */ if (p->se.sched_delayed) { + /* First, dequeue it from its new class' structures */ dequeue_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SLEEP); - p->se.sched_delayed = 0; + /* + * Now, clean up the fair_sched_class side of things + * related to sched_delayed being true and that wasn't done + * due to the generic dequeue not using DEQUEUE_DELAYED. + */ + finish_delayed_dequeue_entity(&p->se); p->se.rel_deadline = 0; - if (sched_feat(DELAY_ZERO) && p->se.vlag > 0) - p->se.vlag = 0; + __block_task(rq, p); } } From 7d2180d9d943d31491d77e336557f33670cfe7fd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:49 +0200 Subject: [PATCH 03/10] sched: Use set_next_task(.first) where required Turns out the core_sched bits forgot to use the set_next_task(.first=true) variant. Notably: pick_next_task() := pick_task() + set_next_task(.first = true) Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224015.614146342@infradead.org --- kernel/sched/core.c | 4 ++-- kernel/sched/sched.h | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 016581168cb8..406b794f8423 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6010,7 +6010,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) next = rq->core_pick; if (next != prev) { put_prev_task(rq, prev); - set_next_task(rq, next); + set_next_task_first(rq, next); } rq->core_pick = NULL; @@ -6184,7 +6184,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } out_set_next: - set_next_task(rq, next); + set_next_task_first(rq, next); out: if (rq->core->core_forceidle_count && next == rq->idle) queue_core_balance(rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 2f5d658c0631..d33311d6a738 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2363,6 +2363,10 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } +static inline void set_next_task_first(struct rq *rq, struct task_struct *next) +{ + next->sched_class->set_next_task(rq, next, true); +} /* * Helper to define a sched_class instance; each one is placed in a separate From dae4320b29f0bbdae93f7c1f6f80b19f109ca0bc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:50 +0200 Subject: [PATCH 04/10] sched: Fixup set_next_task() implementations The rule is that: pick_next_task() := pick_task() + set_next_task(.first = true) Turns out, there's still a few things in pick_next_task() that are missing from that combination. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224015.724111109@infradead.org --- kernel/sched/deadline.c | 6 ++-- kernel/sched/fair.c | 62 ++++++++++++++++++++--------------------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 2e84037b634f..f7ac7cff9b5e 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2386,6 +2386,9 @@ static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); deadline_queue_push_tasks(rq); + + if (hrtick_enabled(rq)) + start_hrtick_dl(rq, &p->dl); } static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) @@ -2452,9 +2455,6 @@ static struct task_struct *pick_next_task_dl(struct rq *rq) if (!p->dl_server) set_next_task_dl(rq, p, true); - if (hrtick_enabled(rq)) - start_hrtick_dl(rq, &p->dl); - return p; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3a3286df282f..eaeb8b2ad834 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8760,6 +8760,9 @@ static struct task_struct *pick_task_fair(struct rq *rq) return task_of(se); } +static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first); +static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first); + struct task_struct * pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { @@ -8808,33 +8811,17 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf put_prev_entity(cfs_rq, pse); set_next_entity(cfs_rq, se); + + __set_next_task_fair(rq, p, true); } - goto done; + return p; + simple: #endif if (prev) put_prev_task(rq, prev); - - for_each_sched_entity(se) - set_next_entity(cfs_rq_of(se), se); - -done: __maybe_unused; -#ifdef CONFIG_SMP - /* - * Move the next running task to the front of - * the list, so our cfs_tasks list becomes MRU - * one. - */ - list_move(&p->se.group_node, &rq->cfs_tasks); -#endif - - if (hrtick_enabled_fair(rq)) - hrtick_start_fair(rq, p); - - update_misfit_status(p, rq); - sched_fair_update_stop_tick(rq, p); - + set_next_task_fair(rq, p, true); return p; idle: @@ -13145,12 +13132,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) } } -/* Account for a task changing its policy or group. - * - * This routine is mostly called to set cfs_rq->curr field when a task - * migrates between groups/classes. - */ -static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) +static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) { struct sched_entity *se = &p->se; @@ -13163,6 +13145,27 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) list_move(&se->group_node, &rq->cfs_tasks); } #endif + if (!first) + return; + + SCHED_WARN_ON(se->sched_delayed); + + if (hrtick_enabled_fair(rq)) + hrtick_start_fair(rq, p); + + update_misfit_status(p, rq); + sched_fair_update_stop_tick(rq, p); +} + +/* + * Account for a task changing its policy or group. + * + * This routine is mostly called to set cfs_rq->curr field when a task + * migrates between groups/classes. + */ +static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) +{ + struct sched_entity *se = &p->se; for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); @@ -13172,10 +13175,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) account_cfs_rq_runtime(cfs_rq, 0); } - if (!first) - return; - - SCHED_WARN_ON(se->sched_delayed); + __set_next_task_fair(rq, p, first); } void init_cfs_rq(struct cfs_rq *cfs_rq) From 4686cc598f669dea1b50dde1568e6c65c355bc67 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:51 +0200 Subject: [PATCH 05/10] sched: Clean up DL server vs core sched Abide by the simple rule: pick_next_task() := pick_task() + set_next_task(.first = true) This allows us to trivially get rid of server_pick_next() and things collapse nicely. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224015.837303391@infradead.org --- include/linux/sched.h | 1 - kernel/sched/deadline.c | 18 +++++++----------- kernel/sched/fair.c | 13 +------------ kernel/sched/sched.h | 1 - 4 files changed, 8 insertions(+), 25 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 3709dedbab59..57cf27a3045c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -692,7 +692,6 @@ struct sched_dl_entity { */ struct rq *rq; dl_server_has_tasks_f server_has_tasks; - dl_server_pick_f server_pick_next; dl_server_pick_f server_pick_task; #ifdef CONFIG_RT_MUTEXES diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index f7ac7cff9b5e..2ea929ce9e4f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1665,12 +1665,10 @@ void dl_server_stop(struct sched_dl_entity *dl_se) void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, dl_server_has_tasks_f has_tasks, - dl_server_pick_f pick_next, dl_server_pick_f pick_task) { dl_se->rq = rq; dl_se->server_has_tasks = has_tasks; - dl_se->server_pick_next = pick_next; dl_se->server_pick_task = pick_task; } @@ -2404,9 +2402,8 @@ static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) /* * __pick_next_task_dl - Helper to pick the next -deadline task to run. * @rq: The runqueue to pick the next task from. - * @peek: If true, just peek at the next task. Only relevant for dlserver. */ -static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek) +static struct task_struct *__pick_task_dl(struct rq *rq) { struct sched_dl_entity *dl_se; struct dl_rq *dl_rq = &rq->dl; @@ -2420,10 +2417,7 @@ static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek) WARN_ON_ONCE(!dl_se); if (dl_server(dl_se)) { - if (IS_ENABLED(CONFIG_SMP) && peek) - p = dl_se->server_pick_task(dl_se); - else - p = dl_se->server_pick_next(dl_se); + p = dl_se->server_pick_task(dl_se); if (!p) { dl_se->dl_yielded = 1; update_curr_dl_se(rq, dl_se, 0); @@ -2440,7 +2434,7 @@ static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek) #ifdef CONFIG_SMP static struct task_struct *pick_task_dl(struct rq *rq) { - return __pick_next_task_dl(rq, true); + return __pick_task_dl(rq); } #endif @@ -2448,11 +2442,13 @@ static struct task_struct *pick_next_task_dl(struct rq *rq) { struct task_struct *p; - p = __pick_next_task_dl(rq, false); + p = __pick_task_dl(rq); if (!p) return p; - if (!p->dl_server) + if (p->dl_server) + p->sched_class->set_next_task(rq, p, true); + else set_next_task_dl(rq, p, true); return p; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index eaeb8b2ad834..8379100905b8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8862,16 +8862,7 @@ static bool fair_server_has_tasks(struct sched_dl_entity *dl_se) static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se) { -#ifdef CONFIG_SMP return pick_task_fair(dl_se->rq); -#else - return NULL; -#endif -} - -static struct task_struct *fair_server_pick_next(struct sched_dl_entity *dl_se) -{ - return pick_next_task_fair(dl_se->rq, NULL, NULL); } void fair_server_init(struct rq *rq) @@ -8880,9 +8871,7 @@ void fair_server_init(struct rq *rq) init_dl_entity(dl_se); - dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick_next, - fair_server_pick_task); - + dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick_task); } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d33311d6a738..80605e2da483 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -362,7 +362,6 @@ extern void dl_server_start(struct sched_dl_entity *dl_se); extern void dl_server_stop(struct sched_dl_entity *dl_se); extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, dl_server_has_tasks_f has_tasks, - dl_server_pick_f pick_next, dl_server_pick_f pick_task); extern void dl_server_update_idle_time(struct rq *rq, From 260598f142c34811d226fdde5ab0346b48181439 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:52 +0200 Subject: [PATCH 06/10] sched: Split up put_prev_task_balance() With the goal of pushing put_prev_task() after pick_task() / into pick_next_task(). Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224015.943143811@infradead.org --- kernel/sched/core.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 406b794f8423..36f9bc509ff2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5841,8 +5841,8 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt) schedstat_inc(this_rq()->sched_count); } -static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, - struct rq_flags *rf) +static void prev_balance(struct rq *rq, struct task_struct *prev, + struct rq_flags *rf) { #ifdef CONFIG_SMP const struct sched_class *class; @@ -5860,8 +5860,6 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, } #endif - put_prev_task(rq, prev); - /* * We've updated @prev and no longer need the server link, clear it. * Must be done before ->pick_next_task() because that can (re)set @@ -5917,7 +5915,8 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } restart: - put_prev_task_balance(rq, prev, rf); + prev_balance(rq, prev, rf); + put_prev_task(rq, prev); for_each_class(class) { p = class->pick_next_task(rq); @@ -6017,7 +6016,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) goto out; } - put_prev_task_balance(rq, prev, rf); + prev_balance(rq, prev, rf); + put_prev_task(rq, prev); smt_mask = cpu_smt_mask(cpu); need_sync = !!rq->core->core_cookie; From fd03c5b8585562d60f8b597b4332d28f48abfe7d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:53 +0200 Subject: [PATCH 07/10] sched: Rework pick_next_task() The current rule is that: pick_next_task() := pick_task() + set_next_task(.first = true) And many classes implement it directly as such. Change things around to make pick_next_task() optional while also changing the definition to: pick_next_task(prev) := pick_task() + put_prev_task() + set_next_task(.first = true) The reason is that sched_ext would like to have a 'final' call that knows the next task. By placing put_prev_task() right next to set_next_task() (as it already is for sched_core) this becomes trivial. As a bonus, this is a nice cleanup on its own. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224016.051225657@infradead.org --- kernel/sched/core.c | 21 +++++++++++++++------ kernel/sched/deadline.c | 21 +-------------------- kernel/sched/fair.c | 11 +++++------ kernel/sched/idle.c | 16 ++-------------- kernel/sched/rt.c | 13 +------------ kernel/sched/sched.h | 16 ++++++++++++---- kernel/sched/stop_task.c | 13 +------------ 7 files changed, 37 insertions(+), 74 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 36f9bc509ff2..b9429eb5dbbe 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5893,8 +5893,9 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* Assume the next prioritized class is idle_sched_class */ if (!p) { + p = pick_task_idle(rq); put_prev_task(rq, prev); - p = pick_next_task_idle(rq); + set_next_task_first(rq, p); } /* @@ -5916,12 +5917,20 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) restart: prev_balance(rq, prev, rf); - put_prev_task(rq, prev); for_each_class(class) { - p = class->pick_next_task(rq); - if (p) - return p; + if (class->pick_next_task) { + p = class->pick_next_task(rq, prev); + if (p) + return p; + } else { + p = class->pick_task(rq); + if (p) { + put_prev_task(rq, prev); + set_next_task_first(rq, p); + return p; + } + } } BUG(); /* The idle class should always have a runnable task. */ @@ -6017,7 +6026,6 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } prev_balance(rq, prev, rf); - put_prev_task(rq, prev); smt_mask = cpu_smt_mask(cpu); need_sync = !!rq->core->core_cookie; @@ -6184,6 +6192,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } out_set_next: + put_prev_task(rq, prev); set_next_task_first(rq, next); out: if (rq->core->core_forceidle_count && next == rq->idle) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 2ea929ce9e4f..a1547e1cd96e 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2431,28 +2431,10 @@ static struct task_struct *__pick_task_dl(struct rq *rq) return p; } -#ifdef CONFIG_SMP static struct task_struct *pick_task_dl(struct rq *rq) { return __pick_task_dl(rq); } -#endif - -static struct task_struct *pick_next_task_dl(struct rq *rq) -{ - struct task_struct *p; - - p = __pick_task_dl(rq); - if (!p) - return p; - - if (p->dl_server) - p->sched_class->set_next_task(rq, p, true); - else - set_next_task_dl(rq, p, true); - - return p; -} static void put_prev_task_dl(struct rq *rq, struct task_struct *p) { @@ -3146,13 +3128,12 @@ DEFINE_SCHED_CLASS(dl) = { .wakeup_preempt = wakeup_preempt_dl, - .pick_next_task = pick_next_task_dl, + .pick_task = pick_task_dl, .put_prev_task = put_prev_task_dl, .set_next_task = set_next_task_dl, #ifdef CONFIG_SMP .balance = balance_dl, - .pick_task = pick_task_dl, .select_task_rq = select_task_rq_dl, .migrate_task_rq = migrate_task_rq_dl, .set_cpus_allowed = set_cpus_allowed_dl, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8379100905b8..53556b08edef 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8777,7 +8777,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf se = &p->se; #ifdef CONFIG_FAIR_GROUP_SCHED - if (!prev || prev->sched_class != &fair_sched_class) + if (prev->sched_class != &fair_sched_class) goto simple; /* @@ -8819,8 +8819,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf simple: #endif - if (prev) - put_prev_task(rq, prev); + put_prev_task(rq, prev); set_next_task_fair(rq, p, true); return p; @@ -8850,9 +8849,9 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf return NULL; } -static struct task_struct *__pick_next_task_fair(struct rq *rq) +static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev) { - return pick_next_task_fair(rq, NULL, NULL); + return pick_next_task_fair(rq, prev, NULL); } static bool fair_server_has_tasks(struct sched_dl_entity *dl_se) @@ -13490,13 +13489,13 @@ DEFINE_SCHED_CLASS(fair) = { .wakeup_preempt = check_preempt_wakeup_fair, + .pick_task = pick_task_fair, .pick_next_task = __pick_next_task_fair, .put_prev_task = put_prev_task_fair, .set_next_task = set_next_task_fair, #ifdef CONFIG_SMP .balance = balance_fair, - .pick_task = pick_task_fair, .select_task_rq = select_task_rq_fair, .migrate_task_rq = migrate_task_rq_fair, diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 16074209ce26..a343e1c97f56 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -462,21 +462,10 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir next->se.exec_start = rq_clock_task(rq); } -#ifdef CONFIG_SMP -static struct task_struct *pick_task_idle(struct rq *rq) +struct task_struct *pick_task_idle(struct rq *rq) { return rq->idle; } -#endif - -struct task_struct *pick_next_task_idle(struct rq *rq) -{ - struct task_struct *next = rq->idle; - - set_next_task_idle(rq, next, true); - - return next; -} /* * It is not legal to sleep in the idle task - print a warning @@ -531,13 +520,12 @@ DEFINE_SCHED_CLASS(idle) = { .wakeup_preempt = wakeup_preempt_idle, - .pick_next_task = pick_next_task_idle, + .pick_task = pick_task_idle, .put_prev_task = put_prev_task_idle, .set_next_task = set_next_task_idle, #ifdef CONFIG_SMP .balance = balance_idle, - .pick_task = pick_task_idle, .select_task_rq = select_task_rq_idle, .set_cpus_allowed = set_cpus_allowed_common, #endif diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index fdc8e059cab0..8025f394b0e0 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1748,16 +1748,6 @@ static struct task_struct *pick_task_rt(struct rq *rq) return p; } -static struct task_struct *pick_next_task_rt(struct rq *rq) -{ - struct task_struct *p = pick_task_rt(rq); - - if (p) - set_next_task_rt(rq, p, true); - - return p; -} - static void put_prev_task_rt(struct rq *rq, struct task_struct *p) { struct sched_rt_entity *rt_se = &p->rt; @@ -2645,13 +2635,12 @@ DEFINE_SCHED_CLASS(rt) = { .wakeup_preempt = wakeup_preempt_rt, - .pick_next_task = pick_next_task_rt, + .pick_task = pick_task_rt, .put_prev_task = put_prev_task_rt, .set_next_task = set_next_task_rt, #ifdef CONFIG_SMP .balance = balance_rt, - .pick_task = pick_task_rt, .select_task_rq = select_task_rq_rt, .set_cpus_allowed = set_cpus_allowed_common, .rq_online = rq_online_rt, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 80605e2da483..64a4ed758ba1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2300,7 +2300,17 @@ struct sched_class { void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags); - struct task_struct *(*pick_next_task)(struct rq *rq); + struct task_struct *(*pick_task)(struct rq *rq); + /* + * Optional! When implemented pick_next_task() should be equivalent to: + * + * next = pick_task(); + * if (next) { + * put_prev_task(prev); + * set_next_task_first(next); + * } + */ + struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev); void (*put_prev_task)(struct rq *rq, struct task_struct *p); void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); @@ -2309,8 +2319,6 @@ struct sched_class { int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); - struct task_struct * (*pick_task)(struct rq *rq); - void (*migrate_task_rq)(struct task_struct *p, int new_cpu); void (*task_woken)(struct rq *this_rq, struct task_struct *task); @@ -2421,7 +2429,7 @@ static inline bool sched_fair_runnable(struct rq *rq) } extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); -extern struct task_struct *pick_next_task_idle(struct rq *rq); +extern struct task_struct *pick_task_idle(struct rq *rq); #define SCA_CHECK 0x01 #define SCA_MIGRATE_DISABLE 0x02 diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 4cf02074fa9e..0fd5352ff0ce 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -41,16 +41,6 @@ static struct task_struct *pick_task_stop(struct rq *rq) return rq->stop; } -static struct task_struct *pick_next_task_stop(struct rq *rq) -{ - struct task_struct *p = pick_task_stop(rq); - - if (p) - set_next_task_stop(rq, p, true); - - return p; -} - static void enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) { @@ -112,13 +102,12 @@ DEFINE_SCHED_CLASS(stop) = { .wakeup_preempt = wakeup_preempt_stop, - .pick_next_task = pick_next_task_stop, + .pick_task = pick_task_stop, .put_prev_task = put_prev_task_stop, .set_next_task = set_next_task_stop, #ifdef CONFIG_SMP .balance = balance_stop, - .pick_task = pick_task_stop, .select_task_rq = select_task_rq_stop, .set_cpus_allowed = set_cpus_allowed_common, #endif From 436f3eed5c69c1048a5754df6e3dbb291e5cccbd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:54 +0200 Subject: [PATCH 08/10] sched: Combine the last put_prev_task() and the first set_next_task() Ensure the last put_prev_task() and the first set_next_task() always go together. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224016.158454756@infradead.org --- kernel/sched/core.c | 17 ++++------------- kernel/sched/fair.c | 3 +-- kernel/sched/sched.h | 10 +++++++++- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b9429eb5dbbe..8a1cf93da203 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5894,8 +5894,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* Assume the next prioritized class is idle_sched_class */ if (!p) { p = pick_task_idle(rq); - put_prev_task(rq, prev); - set_next_task_first(rq, p); + put_prev_set_next_task(rq, prev, p); } /* @@ -5926,8 +5925,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } else { p = class->pick_task(rq); if (p) { - put_prev_task(rq, prev); - set_next_task_first(rq, p); + put_prev_set_next_task(rq, prev, p); return p; } } @@ -6016,13 +6014,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); next = rq->core_pick; - if (next != prev) { - put_prev_task(rq, prev); - set_next_task_first(rq, next); - } - rq->core_pick = NULL; - goto out; + goto out_set_next; } prev_balance(rq, prev, rf); @@ -6192,9 +6185,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } out_set_next: - put_prev_task(rq, prev); - set_next_task_first(rq, next); -out: + put_prev_set_next_task(rq, prev, next); if (rq->core->core_forceidle_count && next == rq->idle) queue_core_balance(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 53556b08edef..c5b7873dcc30 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8819,8 +8819,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf simple: #endif - put_prev_task(rq, prev); - set_next_task_fair(rq, p, true); + put_prev_set_next_task(rq, prev, p); return p; idle: diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 64a4ed758ba1..aae35818cca4 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2370,8 +2370,16 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } -static inline void set_next_task_first(struct rq *rq, struct task_struct *next) +static inline void put_prev_set_next_task(struct rq *rq, + struct task_struct *prev, + struct task_struct *next) { + WARN_ON_ONCE(rq->curr != prev); + + if (next == prev) + return; + + prev->sched_class->put_prev_task(rq, prev); next->sched_class->set_next_task(rq, next, true); } From bd9bbc96e8356886971317f57994247ca491dbf1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:55 +0200 Subject: [PATCH 09/10] sched: Rework dl_server When a task is selected through a dl_server, it will have p->dl_server set, such that it can account runtime to the dl_server, see update_curr_task(). Currently p->dl_server is set in pick*task() whenever it goes through the dl_server, clearing it is a bit of a mess though. The trivial solution is clearing it on the final put (now that we have this location). However, this gives a problem when: p = pick_task(rq); if (p) put_prev_set_next_task(rq, prev, next); picks the same task but through a different path, notably when it goes from picking through the dl_server to a direct pick or vice-versa. In that case we cannot readily determine wether we should clear or preserve p->dl_server. An additional complication is pick_*task() setting p->dl_server for a remote pick, it might still need to update runtime before it schedules the core_pick. Close all these holes and remove all the random clearing of p->dl_server by: - having pick_*task() manage rq->dl_server - having the final put_prev_task() clear p->dl_server - having the first set_next_task() set p->dl_server = rq->dl_server - complicate the core_sched code to save/restore rq->dl_server where appropriate. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224016.259853414@infradead.org --- kernel/sched/core.c | 40 +++++++++++++++------------------------- kernel/sched/deadline.c | 2 +- kernel/sched/fair.c | 10 ++-------- kernel/sched/sched.h | 14 ++++++++++++++ 4 files changed, 32 insertions(+), 34 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8a1cf93da203..ffcd637dc8e4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3668,8 +3668,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, rq->idle_stamp = 0; } #endif - - p->dl_server = NULL; } /* @@ -5859,14 +5857,6 @@ static void prev_balance(struct rq *rq, struct task_struct *prev, break; } #endif - - /* - * We've updated @prev and no longer need the server link, clear it. - * Must be done before ->pick_next_task() because that can (re)set - * ->dl_server. - */ - if (prev->dl_server) - prev->dl_server = NULL; } /* @@ -5878,6 +5868,8 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) const struct sched_class *class; struct task_struct *p; + rq->dl_server = NULL; + /* * Optimization: we know that if all tasks are in the fair class we can * call that function directly, but only if the @prev task wasn't of a @@ -5897,20 +5889,6 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) put_prev_set_next_task(rq, prev, p); } - /* - * This is a normal CFS pick, but the previous could be a DL pick. - * Clear it as previous is no longer picked. - */ - if (prev->dl_server) - prev->dl_server = NULL; - - /* - * This is the fast path; it cannot be a DL server pick; - * therefore even if @p == @prev, ->dl_server must be NULL. - */ - if (p->dl_server) - p->dl_server = NULL; - return p; } @@ -5958,6 +5936,8 @@ static inline struct task_struct *pick_task(struct rq *rq) const struct sched_class *class; struct task_struct *p; + rq->dl_server = NULL; + for_each_class(class) { p = class->pick_task(rq); if (p) @@ -5996,6 +5976,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * another cpu during offline. */ rq->core_pick = NULL; + rq->core_dl_server = NULL; return __pick_next_task(rq, prev, rf); } @@ -6014,7 +5995,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); next = rq->core_pick; + rq->dl_server = rq->core_dl_server; rq->core_pick = NULL; + rq->core_dl_server = NULL; goto out_set_next; } @@ -6059,6 +6042,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) next = pick_task(rq); if (!next->core_cookie) { rq->core_pick = NULL; + rq->core_dl_server = NULL; /* * For robustness, update the min_vruntime_fi for * unconstrained picks as well. @@ -6086,7 +6070,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (i != cpu && (rq_i != rq->core || !core_clock_updated)) update_rq_clock(rq_i); - p = rq_i->core_pick = pick_task(rq_i); + rq_i->core_pick = p = pick_task(rq_i); + rq_i->core_dl_server = rq_i->dl_server; + if (!max || prio_less(max, p, fi_before)) max = p; } @@ -6110,6 +6096,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } rq_i->core_pick = p; + rq_i->core_dl_server = NULL; if (p == rq_i->idle) { if (rq_i->nr_running) { @@ -6170,6 +6157,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (i == cpu) { rq_i->core_pick = NULL; + rq_i->core_dl_server = NULL; continue; } @@ -6178,6 +6166,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (rq_i->curr == rq_i->core_pick) { rq_i->core_pick = NULL; + rq_i->core_dl_server = NULL; continue; } @@ -8401,6 +8390,7 @@ void __init sched_init(void) #ifdef CONFIG_SCHED_CORE rq->core = rq; rq->core_pick = NULL; + rq->core_dl_server = NULL; rq->core_enabled = 0; rq->core_tree = RB_ROOT; rq->core_forceidle_count = 0; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index a1547e1cd96e..e83b68430627 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2423,7 +2423,7 @@ static struct task_struct *__pick_task_dl(struct rq *rq) update_curr_dl_se(rq, dl_se, 0); goto again; } - p->dl_server = dl_se; + rq->dl_server = dl_se; } else { p = dl_task_of(dl_se); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c5b7873dcc30..f67311217063 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8749,14 +8749,6 @@ static struct task_struct *pick_task_fair(struct rq *rq) cfs_rq = group_cfs_rq(se); } while (cfs_rq); - /* - * This can be called from directly from CFS's ->pick_task() or indirectly - * from DL's ->pick_task when fair server is enabled. In the indirect case, - * DL will set ->dl_server just after this function is called, so its Ok to - * clear. In the direct case, we are picking directly so we must clear it. - */ - task_of(se)->dl_server = NULL; - return task_of(se); } @@ -8780,6 +8772,8 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf if (prev->sched_class != &fair_sched_class) goto simple; + __put_prev_set_next_dl_server(rq, prev, p); + /* * Because of the set_next_buddy() in dequeue_task_fair() it is rather * likely that a next task is from the same cgroup as the current. diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index aae35818cca4..2a216c9153e9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1066,6 +1066,7 @@ struct rq { unsigned int nr_uninterruptible; struct task_struct __rcu *curr; + struct sched_dl_entity *dl_server; struct task_struct *idle; struct task_struct *stop; unsigned long next_balance; @@ -1193,6 +1194,7 @@ struct rq { /* per rq */ struct rq *core; struct task_struct *core_pick; + struct sched_dl_entity *core_dl_server; unsigned int core_enabled; unsigned int core_sched_seq; struct rb_root core_tree; @@ -2370,12 +2372,24 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } +static inline void +__put_prev_set_next_dl_server(struct rq *rq, + struct task_struct *prev, + struct task_struct *next) +{ + prev->dl_server = NULL; + next->dl_server = rq->dl_server; + rq->dl_server = NULL; +} + static inline void put_prev_set_next_task(struct rq *rq, struct task_struct *prev, struct task_struct *next) { WARN_ON_ONCE(rq->curr != prev); + __put_prev_set_next_dl_server(rq, prev, next); + if (next == prev) return; From b2d70222dbf2a2ff7a972a685d249a5d75afa87f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:56 +0200 Subject: [PATCH 10/10] sched: Add put_prev_task(.next) In order to tell the previous sched_class what the next task is, add put_prev_task(.next). Notable SCX will use this to: 1) determine the next task will leave the SCX sched class and push the current task to another CPU if possible. 2) statistics on how often and which other classes preempt it Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224016.367421076@infradead.org --- kernel/sched/deadline.c | 2 +- kernel/sched/fair.c | 2 +- kernel/sched/idle.c | 2 +- kernel/sched/rt.c | 2 +- kernel/sched/sched.h | 6 +++--- kernel/sched/stop_task.c | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e83b68430627..9ce93d0bf452 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2436,7 +2436,7 @@ static struct task_struct *pick_task_dl(struct rq *rq) return __pick_task_dl(rq); } -static void put_prev_task_dl(struct rq *rq, struct task_struct *p) +static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next) { struct sched_dl_entity *dl_se = &p->dl; struct dl_rq *dl_rq = &rq->dl; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f67311217063..d697a0a3fc73 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8869,7 +8869,7 @@ void fair_server_init(struct rq *rq) /* * Account for a descheduled task: */ -static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) +static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct sched_entity *se = &prev->se; struct cfs_rq *cfs_rq; diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index a343e1c97f56..7a105a0123aa 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -450,7 +450,7 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags) resched_curr(rq); } -static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) +static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next) { dl_server_update_idle_time(rq, prev); } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 8025f394b0e0..172c588de542 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1748,7 +1748,7 @@ static struct task_struct *pick_task_rt(struct rq *rq) return p; } -static void put_prev_task_rt(struct rq *rq, struct task_struct *p) +static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next) { struct sched_rt_entity *rt_se = &p->rt; struct rt_rq *rt_rq = &rq->rt; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 2a216c9153e9..3744f16a1293 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2314,7 +2314,7 @@ struct sched_class { */ struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev); - void (*put_prev_task)(struct rq *rq, struct task_struct *p); + void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next); void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); #ifdef CONFIG_SMP @@ -2364,7 +2364,7 @@ struct sched_class { static inline void put_prev_task(struct rq *rq, struct task_struct *prev) { WARN_ON_ONCE(rq->curr != prev); - prev->sched_class->put_prev_task(rq, prev); + prev->sched_class->put_prev_task(rq, prev, NULL); } static inline void set_next_task(struct rq *rq, struct task_struct *next) @@ -2393,7 +2393,7 @@ static inline void put_prev_set_next_task(struct rq *rq, if (next == prev) return; - prev->sched_class->put_prev_task(rq, prev); + prev->sched_class->put_prev_task(rq, prev, next); next->sched_class->set_next_task(rq, next, true); } diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 0fd5352ff0ce..058dd42e3d9b 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -59,7 +59,7 @@ static void yield_task_stop(struct rq *rq) BUG(); /* the stop task should never yield, its pointless. */ } -static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) +static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct task_struct *next) { update_curr_common(rq); }