@@ -50,6 +50,8 @@ const volatile u32 cpu_freq_max = SCX_CPUPERF_ONE;
5050const volatile u32 degradation_freq_frac32 = 1 ;
5151const volatile u64 degradation_frac7 = 0 ;
5252
53+ const volatile u32 backwards_sticky_freq_frac32 = 1 ;
54+
5355#define MIN (x , y ) ((x) < (y) ? (x) : (y))
5456#define MAX (x , y ) ((x) > (y) ? (x) : (y))
5557
@@ -77,11 +79,21 @@ struct chaos_task_ctx *lookup_create_chaos_task_ctx(struct task_struct *p)
7779 return bpf_task_storage_get (& chaos_task_ctxs , p , NULL , BPF_LOCAL_STORAGE_GET_F_CREATE );
7880}
7981
82+ static __always_inline bool keep_backwards_sticky (struct chaos_task_ctx * taskc )
83+ {
84+ return taskc -> next_trait == CHAOS_TRAIT_BACKWARDS_STICKY &&
85+ taskc -> bs_cpumask &&
86+ bpf_cpumask_weight (cast_mask (taskc -> bs_cpumask )) != nr_cpus ;
87+ }
88+
8089static __always_inline enum chaos_trait_kind choose_chaos (struct chaos_task_ctx * taskc )
8190{
8291 if (taskc -> match & CHAOS_MATCH_EXCLUDED )
8392 return CHAOS_TRAIT_NONE ;
8493
94+ if (keep_backwards_sticky (taskc ))
95+ return -1 ;
96+
8597 u32 roll = bpf_get_prandom_u32 ();
8698
8799 #pragma unroll
@@ -113,6 +125,23 @@ static __always_inline u64 get_cpu_delay_dsq(int cpu_idx)
113125 return CHAOS_DSQ_BASE | cpu_idx ;
114126}
115127
128+ static __always_inline s32 backwards_sticky_cpu (struct chaos_task_ctx * taskc )
129+ {
130+ s32 cpu ;
131+ if (!taskc -> bs_cpumask || bpf_cpumask_weight (cast_mask (taskc -> bs_cpumask )) == nr_cpus )
132+ return - ENOENT ;
133+
134+ if (!taskc -> bs_cpumask )
135+ return - ENOENT ;
136+
137+ cpu = bpf_cpumask_first_zero (cast_mask (taskc -> bs_cpumask ));
138+ if (!taskc -> bs_cpumask )
139+ return - ENOENT ;
140+ bpf_cpumask_set_cpu (cpu , taskc -> bs_cpumask );
141+
142+ return cpu ;
143+ }
144+
116145static __always_inline s32 calculate_chaos_match (struct task_struct * p )
117146{
118147 struct chaos_task_ctx * taskc ;
@@ -259,6 +288,15 @@ __weak s32 enqueue_chaotic(struct task_struct *p __arg_trusted, u64 enq_flags,
259288 out = enqueue_random_delay (p , enq_flags , taskc );
260289 break ;
261290
291+ case CHAOS_TRAIT_BACKWARDS_STICKY :
292+ out = false;
293+ if (taskc -> bs_cpumask &&
294+ bpf_cpumask_weight (cast_mask (taskc -> bs_cpumask )) == nr_cpus ) {
295+ if (taskc -> bs_cpumask )
296+ bpf_cpumask_clear (taskc -> bs_cpumask );
297+ taskc -> next_trait = CHAOS_TRAIT_NONE ;
298+ }
299+ return out ;
262300 case CHAOS_TRAIT_NONE :
263301 case CHAOS_TRAIT_CPU_FREQ :
264302 case CHAOS_TRAIT_DEGRADATION :
@@ -474,6 +512,27 @@ void BPF_STRUCT_OPS(chaos_enqueue, struct task_struct *p __arg_trusted, u64 enq_
474512 }
475513 }
476514
515+ // Backwards sticky means the task will cycle through all CPUs before
516+ // becoming sticky again. Do a direct dispatch to ensure the task lands
517+ // on the correct CPU and trashes the caches.
518+ if (taskc -> next_trait == CHAOS_TRAIT_BACKWARDS_STICKY ) {
519+ s32 cpu = backwards_sticky_cpu (taskc );
520+ if (cpu >= 0 ) {
521+ u64 slice_ns ;
522+ if (promise .kind == P2DQ_ENQUEUE_PROMISE_VTIME )
523+ slice_ns = promise .vtime .slice_ns ;
524+ if (promise .kind == P2DQ_ENQUEUE_PROMISE_FIFO )
525+ slice_ns = promise .fifo .slice_ns ;
526+
527+ scx_bpf_dsq_insert (p ,
528+ SCX_DSQ_LOCAL_ON | cpu ,
529+ slice_ns ,
530+ enq_flags );
531+ promise .kind = P2DQ_ENQUEUE_PROMISE_COMPLETE ;
532+ return ;
533+ }
534+ }
535+
477536 complete_p2dq_enqueue (& promise , p );
478537}
479538
@@ -532,6 +591,26 @@ s32 BPF_STRUCT_OPS(chaos_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wa
532591s32 BPF_STRUCT_OPS_SLEEPABLE (chaos_init_task , struct task_struct * p ,
533592 struct scx_init_task_args * args )
534593{
594+ struct bpf_cpumask * bs_cpumask ;
595+ struct chaos_task_ctx * taskc ;
596+
597+ if (!(taskc = lookup_create_chaos_task_ctx (p ))) {
598+ scx_bpf_error ("failed to lookup task context in init_task" );
599+ return - ENOMEM ;
600+ }
601+
602+ if (!(bs_cpumask = bpf_cpumask_create ())) {
603+ scx_bpf_error ("chaos_task_ctx bs_cpumask allocation failure" );
604+ return - ENOMEM ;
605+ }
606+
607+ if ((bs_cpumask = bpf_kptr_xchg (taskc -> bs_cpumask , bs_cpumask ))) {
608+ bpf_cpumask_release (bs_cpumask );
609+ scx_bpf_error ("chaos_task_ctx bs_cpumask allocation failure" );
610+ return - EINVAL ;
611+ }
612+ bpf_cpumask_clear (taskc -> bs_cpumask );
613+
535614 s32 ret = p2dq_init_task_impl (p , args );
536615 if (ret )
537616 return ret ;
0 commit comments