Skip to content

Commit 08dbe60

Browse files
committed
scx_p2dq: Add backwards sticky scheduling
Backwards sticky scheduling ensures the task thrashes all CPUs like a bull in a china shop before it matures and settles down to realize it's true potential by staying sticky to it's roots. Or some BS like that... Signed-off-by: Daniel Hodges <[email protected]>
1 parent 05420aa commit 08dbe60

File tree

3 files changed

+22
-0
lines changed

3 files changed

+22
-0
lines changed

scheds/rust/scx_p2dq/src/bpf/main.bpf.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ const volatile u32 interactive_ratio = 10;
6060
const volatile u32 min_nr_queued_pick2 = 10;
6161

6262
const volatile bool autoslice = true;
63+
const volatile bool backwards_sticky = false;
6364
const volatile bool dispatch_pick2_disable = false;
6465
const volatile bool eager_load_balance = true;
6566
const volatile bool interactive_sticky = false;
@@ -434,6 +435,19 @@ static s32 pick_idle_cpu(struct task_struct *p, task_ctx *taskc,
434435
goto found_cpu;
435436
}
436437

438+
if (backwards_sticky &&
439+
((taskc->bs_mask & nr_cpus) != nr_cpus)) {
440+
s32 i;
441+
bpf_for(i, 0, nr_cpus)
442+
if (taskc->bs_mask == 0 || (taskc->bs_mask & i) != i) {
443+
if (i == nr_cpus - 1)
444+
taskc->bs_mask &= nr_cpus;
445+
cpu = i;
446+
*is_idle = scx_bpf_test_and_clear_cpu_idle(i);
447+
goto found_cpu;
448+
}
449+
}
450+
437451
// First check if last CPU is idle
438452
if (taskc->all_cpus &&
439453
bpf_cpumask_test_cpu(prev_cpu, (smt_enabled && !interactive) ?
@@ -770,6 +784,8 @@ static __always_inline int p2dq_running_impl(struct task_struct *p)
770784
if (taskc->node_id != cpuc->node_id) {
771785
stat_inc(P2DQ_STAT_NODE_MIGRATION);
772786
}
787+
if (backwards_sticky)
788+
taskc->bs_mask |= task_cpu;
773789

774790
taskc->llc_id = llcx->id;
775791
taskc->node_id = llcx->node_id;

scheds/rust/scx_p2dq/src/bpf/types.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ struct task_p2dq {
5858
u64 last_run_at;
5959
u64 llc_runs; /* how many runs on the current LLC */
6060
int last_dsq_index;
61+
s32 bs_mask;
6162

6263
/* The task is a workqueue worker thread */
6364
bool is_kworker;

scheds/rust/scx_p2dq/src/lib.rs

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,10 @@ pub struct SchedulerOpts {
4545
#[clap(short = 'e', long, help="DEPRECATED", action = clap::ArgAction::SetTrue)]
4646
pub eager_load_balance: bool,
4747

48+
/// Enables backward sticky scheduling.
49+
#[clap(long= "bs", action = clap::ArgAction::SetTrue)]
50+
pub backwards_sticky: bool,
51+
4852
/// Enables CPU frequency control.
4953
#[clap(short = 'f', long, action = clap::ArgAction::SetTrue)]
5054
pub freq_control: bool,
@@ -207,6 +211,7 @@ macro_rules! init_open_skel {
207211
$skel.maps.rodata_data.lb_slack_factor = opts.lb_slack_factor;
208212

209213
$skel.maps.rodata_data.autoslice = opts.autoslice;
214+
$skel.maps.rodata_data.backwards_sticky = opts.backwards_sticky;
210215
$skel.maps.rodata_data.debug = verbose as u32;
211216
$skel.maps.rodata_data.dispatch_pick2_disable = opts.dispatch_pick2_disable;
212217
$skel.maps.rodata_data.dispatch_lb_busy = opts.dispatch_lb_busy;

0 commit comments

Comments
 (0)