Skip to content

Commit

Permalink
scx_lavd: kick CPU explicitly at the ops.enqueue() path
Browse files Browse the repository at this point in the history
When the current task is decided to yield, we should explicitly call
scx_bpf_kick_cpu(_, SCX_KICK_PREEMPT). Setting the current task's time
slice to zero is not sufficient in this because the sched_ext core
does not call resched_curr() at the ops.enqueue() path.

Signed-off-by: Changwoo Min <[email protected]>
  • Loading branch information
Changwoo Min committed Oct 28, 2024
1 parent f56b79b commit 5b91a52
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 23 deletions.
23 changes: 17 additions & 6 deletions scheds/rust/scx_lavd/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1020,7 +1020,7 @@ void BPF_STRUCT_OPS(lavd_enqueue, struct task_struct *p, u64 enq_flags)
struct task_ctx *taskc;
s32 cpu_id;
u64 dsq_id;
bool preempted = false;
bool preempted = false, yield;

/*
* Place a task to a run queue of current cpu's compute domain.
Expand Down Expand Up @@ -1089,8 +1089,11 @@ void BPF_STRUCT_OPS(lavd_enqueue, struct task_struct *p, u64 enq_flags)
p_run = bpf_get_current_task_btf();
taskc_run = try_get_task_ctx(p_run);

if (taskc_run && !is_eligible(taskc_run))
try_yield_current_cpu(p_run, cpuc_cur, taskc_run);
if (taskc_run && !is_eligible(taskc_run)) {
yield = try_yield_current_cpu(p_run, cpuc_cur, taskc_run);
if (yield)
try_kick_cpu(cpuc_cur, cpuc_cur->last_kick_clk);
}
}
}

Expand Down Expand Up @@ -1376,7 +1379,7 @@ void BPF_STRUCT_OPS(lavd_tick, struct task_struct *p_run)
* If a task is eligible, don't consider its being preempted.
*/
if (is_eligible(p_run))
return;
goto update_cpuperf;

/*
* Try to yield the current CPU if there is a higher priority task in
Expand All @@ -1385,16 +1388,24 @@ void BPF_STRUCT_OPS(lavd_tick, struct task_struct *p_run)
cpuc_run = get_cpu_ctx();
taskc_run = get_task_ctx(p_run);
if (!cpuc_run || !taskc_run)
return;
goto update_cpuperf;

preempted = try_yield_current_cpu(p_run, cpuc_run, taskc_run);

/*
* If decided to yield, give up its time slice.
*/
if (preempted) {
p_run->scx.slice = 0;
}
/*
* Update performance target of the current CPU if the current running
* task continues to run.
*/
if (!preempted)
else {
update_cpuperf:
try_decrease_cpuperf_target(cpuc_run);
}
}

void BPF_STRUCT_OPS(lavd_runnable, struct task_struct *p, u64 enq_flags)
Expand Down
28 changes: 11 additions & 17 deletions scheds/rust/scx_lavd/src/bpf/preempt.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -215,29 +215,20 @@ static struct cpu_ctx *find_victim_cpu(const struct cpumask *cpumask,
static bool try_kick_cpu(struct cpu_ctx *victim_cpuc, u64 victim_last_kick_clk)
{
/*
* If the current CPU is a victim, we just reset the current task's
* time slice as an optimization. Othewise, kick the remote CPU for
* preemption.
* Kick a victim CPU if it is not victimized yet by another
* concurrent kick task.
*
* Kicking the victim CPU does _not_ guarantee that task @p will run on
* that CPU. Enqueuing @p to the global queue is one operation, and
* kicking the victim is another asynchronous operation. However, it is
* okay because, anyway, the victim CPU will run a higher-priority task
* than @p.
*/
if (bpf_get_smp_processor_id() == victim_cpuc->cpu_id) {
struct task_struct *tsk = bpf_get_current_task_btf();
tsk->scx.slice = 0;
return true;
}
bool ret;

/*
* Kick the remote victim CPU if it is not victimized yet by another
* concurrent kick task.
*/
bool ret = __sync_bool_compare_and_swap(&victim_cpuc->last_kick_clk,
victim_last_kick_clk,
bpf_ktime_get_ns());
ret = __sync_bool_compare_and_swap(&victim_cpuc->last_kick_clk,
victim_last_kick_clk,
bpf_ktime_get_ns());
if (ret)
scx_bpf_kick_cpu(victim_cpuc->cpu_id, SCX_KICK_PREEMPT);

Expand Down Expand Up @@ -329,8 +320,6 @@ static bool try_yield_current_cpu(struct task_struct *p_run,
ret = __sync_bool_compare_and_swap(
&taskc_wait->victim_cpu,
(s32)LAVD_CPU_ID_NONE, cpu_id);
if (ret)
ret = try_kick_cpu(cpuc_run, cpuc_run->last_kick_clk);
}

/*
Expand All @@ -340,6 +329,11 @@ static bool try_yield_current_cpu(struct task_struct *p_run,
}
bpf_rcu_read_unlock();

/*
* If decided to yield (ret == ture), a caller should gives up
* its time slice (at the ops.tick() path) or explictly kick a
* victim CPU.
*/
return ret;
}

Expand Down

0 comments on commit 5b91a52

Please sign in to comment.