Skip to content

Commit 631919f

Browse files
committed
Merge tag 'sched_ext-for-7.0-rc6-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext
Pull sched_ext fixes from Tejun Heo: "These are late but both fix subtle yet critical problems and the blast radius is limited strictly to sched_ext. - Fix stale direct dispatch state in ddsp_dsq_id which can cause spurious warnings in mark_direct_dispatch() on task wakeup - Fix is_bpf_migration_disabled() false negative on non-PREEMPT_RCU configs which can lead to incorrectly dispatching migration- disabled tasks to remote CPUs" * tag 'sched_ext-for-7.0-rc6-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext: sched_ext: Fix stale direct dispatch state in ddsp_dsq_id sched_ext: Fix is_bpf_migration_disabled() false negative on non-PREEMPT_RCU
2 parents e41255c + 7e0ffb7 commit 631919f

2 files changed

Lines changed: 54 additions & 26 deletions

File tree

kernel/sched/ext.c

Lines changed: 35 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1109,15 +1109,6 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
11091109
dsq_mod_nr(dsq, 1);
11101110
p->scx.dsq = dsq;
11111111

1112-
/*
1113-
* scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1114-
* direct dispatch path, but we clear them here because the direct
1115-
* dispatch verdict may be overridden on the enqueue path during e.g.
1116-
* bypass.
1117-
*/
1118-
p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1119-
p->scx.ddsp_enq_flags = 0;
1120-
11211112
/*
11221113
* We're transitioning out of QUEUEING or DISPATCHING. store_release to
11231114
* match waiters' load_acquire.
@@ -1283,12 +1274,34 @@ static void mark_direct_dispatch(struct scx_sched *sch,
12831274
p->scx.ddsp_enq_flags = enq_flags;
12841275
}
12851276

1277+
/*
1278+
* Clear @p direct dispatch state when leaving the scheduler.
1279+
*
1280+
* Direct dispatch state must be cleared in the following cases:
1281+
* - direct_dispatch(): cleared on the synchronous enqueue path, deferred
1282+
* dispatch keeps the state until consumed
1283+
* - process_ddsp_deferred_locals(): cleared after consuming deferred state,
1284+
* - do_enqueue_task(): cleared on enqueue fallbacks where the dispatch
1285+
* verdict is ignored (local/global/bypass)
1286+
* - dequeue_task_scx(): cleared after dispatch_dequeue(), covering deferred
1287+
* cancellation and holding_cpu races
1288+
* - scx_disable_task(): cleared for queued wakeup tasks, which are excluded by
1289+
* the scx_bypass() loop, so that stale state is not reused by a subsequent
1290+
* scheduler instance
1291+
*/
1292+
static inline void clear_direct_dispatch(struct task_struct *p)
1293+
{
1294+
p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1295+
p->scx.ddsp_enq_flags = 0;
1296+
}
1297+
12861298
static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
12871299
u64 enq_flags)
12881300
{
12891301
struct rq *rq = task_rq(p);
12901302
struct scx_dispatch_q *dsq =
12911303
find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
1304+
u64 ddsp_enq_flags;
12921305

12931306
touch_core_sched_dispatch(rq, p);
12941307

@@ -1329,8 +1342,10 @@ static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
13291342
return;
13301343
}
13311344

1332-
dispatch_enqueue(sch, dsq, p,
1333-
p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1345+
ddsp_enq_flags = p->scx.ddsp_enq_flags;
1346+
clear_direct_dispatch(p);
1347+
1348+
dispatch_enqueue(sch, dsq, p, ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
13341349
}
13351350

13361351
static bool scx_rq_online(struct rq *rq)
@@ -1439,6 +1454,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
14391454
*/
14401455
touch_core_sched(rq, p);
14411456
refill_task_slice_dfl(sch, p);
1457+
clear_direct_dispatch(p);
14421458
dispatch_enqueue(sch, dsq, p, enq_flags);
14431459
}
14441460

@@ -1610,6 +1626,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags
16101626
sub_nr_running(rq, 1);
16111627

16121628
dispatch_dequeue(rq, p);
1629+
clear_direct_dispatch(p);
16131630
return true;
16141631
}
16151632

@@ -2293,13 +2310,15 @@ static void process_ddsp_deferred_locals(struct rq *rq)
22932310
struct task_struct, scx.dsq_list.node))) {
22942311
struct scx_sched *sch = scx_root;
22952312
struct scx_dispatch_q *dsq;
2313+
u64 dsq_id = p->scx.ddsp_dsq_id;
2314+
u64 enq_flags = p->scx.ddsp_enq_flags;
22962315

22972316
list_del_init(&p->scx.dsq_list.node);
2317+
clear_direct_dispatch(p);
22982318

2299-
dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
2319+
dsq = find_dsq_for_dispatch(sch, rq, dsq_id, p);
23002320
if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2301-
dispatch_to_local_dsq(sch, rq, dsq, p,
2302-
p->scx.ddsp_enq_flags);
2321+
dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
23032322
}
23042323
}
23052324

@@ -3015,6 +3034,8 @@ static void scx_disable_task(struct task_struct *p)
30153034
lockdep_assert_rq_held(rq);
30163035
WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
30173036

3037+
clear_direct_dispatch(p);
3038+
30183039
if (SCX_HAS_OP(sch, disable))
30193040
SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p);
30203041
scx_set_task_state(p, SCX_TASK_READY);

kernel/sched/ext_idle.c

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -860,25 +860,32 @@ static bool check_builtin_idle_enabled(struct scx_sched *sch)
860860
* code.
861861
*
862862
* We can't simply check whether @p->migration_disabled is set in a
863-
* sched_ext callback, because migration is always disabled for the current
864-
* task while running BPF code.
863+
* sched_ext callback, because the BPF prolog (__bpf_prog_enter) may disable
864+
* migration for the current task while running BPF code.
865865
*
866-
* The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) respectively
867-
* disable and re-enable migration. For this reason, the current task
868-
* inside a sched_ext callback is always a migration-disabled task.
866+
* Since the BPF prolog calls migrate_disable() only when CONFIG_PREEMPT_RCU
867+
* is enabled (via rcu_read_lock_dont_migrate()), migration_disabled == 1 for
868+
* the current task is ambiguous only in that case: it could be from the BPF
869+
* prolog rather than a real migrate_disable() call.
869870
*
870-
* Therefore, when @p->migration_disabled == 1, check whether @p is the
871-
* current task or not: if it is, then migration was not disabled before
872-
* entering the callback, otherwise migration was disabled.
871+
* Without CONFIG_PREEMPT_RCU, the BPF prolog never calls migrate_disable(),
872+
* so migration_disabled == 1 always means the task is truly
873+
* migration-disabled.
874+
*
875+
* Therefore, when migration_disabled == 1 and CONFIG_PREEMPT_RCU is enabled,
876+
* check whether @p is the current task or not: if it is, then migration was
877+
* not disabled before entering the callback, otherwise migration was disabled.
873878
*
874879
* Returns true if @p is migration-disabled, false otherwise.
875880
*/
876881
static bool is_bpf_migration_disabled(const struct task_struct *p)
877882
{
878-
if (p->migration_disabled == 1)
879-
return p != current;
880-
else
881-
return p->migration_disabled;
883+
if (p->migration_disabled == 1) {
884+
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
885+
return p != current;
886+
return true;
887+
}
888+
return p->migration_disabled;
882889
}
883890

884891
static s32 select_cpu_from_kfunc(struct scx_sched *sch, struct task_struct *p,

0 commit comments

Comments
 (0)