@@ -673,6 +673,12 @@ static void update_stat_for_stopping(struct task_struct *p,
673673 if (READ_ONCE (cur_svc_time ) < taskc -> svc_time )
674674 WRITE_ONCE (cur_svc_time , taskc -> svc_time );
675675
676+ /*
677+ * Increase total scaled CPU time of this CPU,
678+ * whcih is capacity and frequency invariant.
679+ */
680+ cpuc -> tot_sc_time += scale_cap_freq (task_runtime , cpuc -> cpu_id );
681+
676682 /*
677683 * Reset task's lock and futex boost count
678684 * for a lock holder to be boosted only once.
@@ -1317,20 +1323,52 @@ void BPF_STRUCT_OPS(lavd_set_cpumask, struct task_struct *p,
13171323 set_on_core_type (taskc , cpumask );
13181324}
13191325
1320- void BPF_STRUCT_OPS (lavd_cpu_release , s32 cpu ,
1321- struct scx_cpu_release_args * args )
1326+ void BPF_STRUCT_OPS (lavd_cpu_acquire , s32 cpu ,
1327+ struct scx_cpu_acquire_args * args )
13221328{
13231329 struct cpu_ctx * cpuc ;
1330+ u64 dur , scaled_dur ;
1331+
1332+ cpuc = get_cpu_ctx_id (cpu );
1333+ if (!cpuc ) {
1334+ scx_bpf_error ("Failed to lookup cpu_ctx %d" , cpu );
1335+ return ;
1336+ }
13241337
13251338 /*
1326- * When a CPU is released to serve higher priority scheduler class,
1327- * reset the CPU's preemption information so it cannot be a victim.
1339+ * When regaining control of a CPU under the higher priority scheduler
1340+ * class, measure how much time the higher priority scheduler class
1341+ * used -- i.e., [lavd_cpu_release, lavd_cpu_acquire]. This will be
1342+ * used to calculate capacity-invariant and frequency-invariant CPU
1343+ * utilization.
1344+ */
1345+ dur = time_delta (scx_bpf_now (), cpuc -> cpu_release_clk );
1346+ scaled_dur = scale_cap_freq (dur , cpu );
1347+ cpuc -> tot_sc_time += scaled_dur ;
1348+
1349+ /*
1350+ * The higher-priority scheduler class could change the CPU frequency,
1351+ * so let's keep track of the frequency when we gain the CPU control.
1352+ * This helps to make the frequency update decision.
13281353 */
1354+ cpuc -> cpuperf_cur = scx_bpf_cpuperf_cur (cpu );
1355+ }
1356+
1357+ void BPF_STRUCT_OPS (lavd_cpu_release , s32 cpu ,
1358+ struct scx_cpu_release_args * args )
1359+ {
1360+ struct cpu_ctx * cpuc ;
1361+
13291362 cpuc = get_cpu_ctx_id (cpu );
13301363 if (!cpuc ) {
13311364 scx_bpf_error ("Failed to lookup cpu_ctx %d" , cpu );
13321365 return ;
13331366 }
1367+
1368+ /*
1369+ * When a CPU is released to serve higher priority scheduler class,
1370+ * reset the CPU's preemption information so it cannot be a victim.
1371+ */
13341372 reset_cpu_preemption_info (cpuc , true);
13351373
13361374 /*
@@ -1343,6 +1381,13 @@ void BPF_STRUCT_OPS(lavd_cpu_release, s32 cpu,
13431381 * the target properly after regaining the control.
13441382 */
13451383 reset_cpuperf_target (cpuc );
1384+
1385+ /*
1386+ * Keep track of when the higher-priority scheduler class takes
1387+ * the CPUto calculate capacity-invariant and frequency-invariant
1388+ * CPU utilization.
1389+ */
1390+ cpuc -> cpu_release_clk = scx_bpf_now ();
13461391}
13471392
13481393void BPF_STRUCT_OPS (lavd_enable , struct task_struct * p )
@@ -1566,6 +1611,7 @@ static s32 init_per_cpu_ctx(u64 now)
15661611 cpuc -> stopping_tm_est_ns = SCX_SLICE_INF ;
15671612 cpuc -> online_clk = now ;
15681613 cpuc -> offline_clk = now ;
1614+ cpuc -> cpu_release_clk = now ;
15691615 cpuc -> is_online = bpf_cpumask_test_cpu (cpu , online_cpumask );
15701616 cpuc -> capacity = get_cpuperf_cap (cpu );
15711617 cpuc -> cpdom_poll_pos = cpu % LAVD_CPDOM_MAX_NR ;
@@ -1745,6 +1791,7 @@ SCX_OPS_DEFINE(lavd_ops,
17451791 .cpu_offline = (void * )lavd_cpu_offline ,
17461792 .update_idle = (void * )lavd_update_idle ,
17471793 .set_cpumask = (void * )lavd_set_cpumask ,
1794+ .cpu_acquire = (void * )lavd_cpu_acquire ,
17481795 .cpu_release = (void * )lavd_cpu_release ,
17491796 .enable = (void * )lavd_enable ,
17501797 .init_task = (void * )lavd_init_task ,
0 commit comments