Skip to content

Commit

Permalink
Merge pull request #804 from sched-ext/rustland-fixes
Browse files Browse the repository at this point in the history
scx_rustland fixes and improvements
  • Loading branch information
arighi authored Oct 16, 2024
2 parents 58093ea + 763da6a commit 2ea47af
Show file tree
Hide file tree
Showing 9 changed files with 187 additions and 394 deletions.
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion rust/scx_rustland_core/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "scx_rustland_core"
version = "2.1.2"
version = "2.2.2"
edition = "2021"
authors = ["Andrea Righi <[email protected]>"]
license = "GPL-2.0-only"
Expand Down
45 changes: 21 additions & 24 deletions rust/scx_rustland_core/assets/bpf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ use std::collections::HashMap;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::sync::Once;

use anyhow::Context;
use anyhow::Result;
Expand Down Expand Up @@ -82,7 +83,10 @@ pub struct QueuedTask {
pub cpu: i32, // CPU where the task is running
pub flags: u64, // task enqueue flags
pub sum_exec_runtime: u64, // Total cpu time
pub nvcsw: u64, // Total amount of voluntary context switches
pub weight: u64, // Task static priority
pub slice: u64, // Time slice budget
pub vtime: u64, // Current vruntime
cpumask_cnt: u64, // cpumask generation counter (private)
}

Expand All @@ -107,9 +111,9 @@ impl DispatchedTask {
pid: task.pid,
cpu: task.cpu,
flags: task.flags,
cpumask_cnt: task.cpumask_cnt,
slice_ns: 0, // use default time slice
vtime: 0,
cpumask_cnt: task.cpumask_cnt,
}
}
}
Expand Down Expand Up @@ -144,7 +148,10 @@ impl EnqueuedMessage {
cpu: self.inner.cpu,
flags: self.inner.flags,
sum_exec_runtime: self.inner.sum_exec_runtime,
nvcsw: self.inner.nvcsw,
weight: self.inner.weight,
slice: self.inner.slice,
vtime: self.inner.vtime,
cpumask_cnt: self.inner.cpumask_cnt,
}
}
Expand All @@ -155,7 +162,6 @@ pub struct BpfScheduler<'cb> {
shutdown: Arc<AtomicBool>, // Determine scheduler shutdown
queued: libbpf_rs::RingBuffer<'cb>, // Ring buffer of queued tasks
dispatched: libbpf_rs::UserRingBuffer, // User Ring buffer of dispatched tasks
cpu_hotplug_cnt: u64, // CPU hotplug generation counter
struct_ops: Option<libbpf_rs::Link>, // Low-level BPF methods
}

Expand Down Expand Up @@ -184,6 +190,18 @@ fn is_smt_active() -> std::io::Result<bool> {
Ok(smt_active == 1)
}

static SET_HANDLER: Once = Once::new();

fn set_ctrlc_handler(shutdown: Arc<AtomicBool>) -> Result<(), anyhow::Error> {
SET_HANDLER.call_once(|| {
let shutdown_clone = shutdown.clone();
ctrlc::set_handler(move || {
shutdown_clone.store(true, Ordering::Relaxed);
}).expect("Error setting Ctrl-C handler");
});
Ok(())
}

impl<'cb> BpfScheduler<'cb> {
pub fn init(
open_object: &'cb mut MaybeUninit<OpenObject>,
Expand All @@ -192,11 +210,7 @@ impl<'cb> BpfScheduler<'cb> {
debug: bool,
) -> Result<Self> {
let shutdown = Arc::new(AtomicBool::new(false));
let shutdown_clone = shutdown.clone();
ctrlc::set_handler(move || {
shutdown_clone.store(true, Ordering::Relaxed);
})
.context("Error setting Ctrl-C handler")?;
set_ctrlc_handler(shutdown.clone()).context("Error setting Ctrl-C handler")?;

// Open the BPF prog first for verification.
let mut skel_builder = BpfSkelBuilder::default();
Expand Down Expand Up @@ -283,7 +297,6 @@ impl<'cb> BpfScheduler<'cb> {
shutdown,
queued,
dispatched,
cpu_hotplug_cnt: 0,
struct_ops,
}),
err => Err(anyhow::Error::msg(format!(
Expand Down Expand Up @@ -377,29 +390,13 @@ impl<'cb> BpfScheduler<'cb> {
})
}

fn refresh_cache_domains(&mut self) {
// Check if we need to refresh the CPU cache information.
if self.cpu_hotplug_cnt == self.skel.maps.bss_data.cpu_hotplug_cnt {
return;
}

// Re-initialize cache domains.
let topo = Topology::new().unwrap();
Self::init_l2_cache_domains(&mut self.skel, &topo).unwrap();
Self::init_l3_cache_domains(&mut self.skel, &topo).unwrap();

// Update CPU hotplug generation counter.
self.cpu_hotplug_cnt = self.skel.maps.bss_data.cpu_hotplug_cnt;
}

// Notify the BPF component that the user-space scheduler has completed its scheduling cycle,
// updating the amount tasks that are still peding.
//
// NOTE: do not set allow(dead_code) for this method, any scheduler must use this method at
// some point, otherwise the BPF component will keep waking-up the user-space scheduler in a
// busy loop, causing unnecessary high CPU consumption.
pub fn notify_complete(&mut self, nr_pending: u64) {
self.refresh_cache_domains();
self.skel.maps.bss_data.nr_scheduled = nr_pending;
std::thread::yield_now();
}
Expand Down
5 changes: 4 additions & 1 deletion rust/scx_rustland_core/assets/bpf/intf.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,12 @@ struct queued_task_ctx {
s32 pid;
s32 cpu; /* CPU where the task is running */
u64 flags; /* task enqueue flags */
u64 cpumask_cnt; /* cpumask generation counter */
u64 sum_exec_runtime; /* Total cpu time */
u64 nvcsw; /* Total amount of voluntary context switches */
u64 weight; /* Task static priority */
u64 slice; /* Time slice budget */
u64 vtime; /* Current task's vruntime */
u64 cpumask_cnt; /* cpumask generation counter */
};

/*
Expand Down
Loading

0 comments on commit 2ea47af

Please sign in to comment.