Skip to content

Commit

Permalink
Created rackscale runner framework for rackscale integration tests
Browse files Browse the repository at this point in the history
  • Loading branch information
hunhoffe committed Jul 17, 2023
1 parent 9fcc15e commit 7760fbd
Show file tree
Hide file tree
Showing 6 changed files with 335 additions and 191 deletions.
2 changes: 1 addition & 1 deletion kernel/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ fn main() {
}

let output = Command::new("git")
.args(&["rev-parse", "HEAD"])
.args(["rev-parse", "HEAD"])
.output()
.expect("Could not determine git hash");
let git_hash = String::from_utf8(output.stdout).expect("Could not parse the git hash");
Expand Down
242 changes: 79 additions & 163 deletions kernel/tests/s06_rackscale_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,22 @@ use std::sync::{mpsc::channel, Mutex};
use rexpect::errors::*;
use rexpect::process::signal::{SIGKILL, SIGTERM};
use rexpect::process::wait::WaitStatus;
use rexpect::session::PtySession;

use testutils::builder::{BuildArgs, Machine};
use testutils::helpers::{
get_shmem_names, notify_controller_of_termination, setup_network, spawn_dcm, spawn_nrk,
spawn_shmem_server, wait_for_client_termination, CLIENT_BUILD_DELAY, SHMEM_SIZE,
get_shmem_names, setup_network, spawn_dcm, spawn_nrk, spawn_shmem_server, CLIENT_BUILD_DELAY,
SHMEM_SIZE,
};
use testutils::runner_args::{
check_for_successful_exit, check_for_successful_exit_no_log, log_qemu_out_with_name,
wait_for_sigterm_or_successful_exit_no_log, RackscaleMode, RackscaleTransport, RunnerArgs,
};

use testutils::rackscale_runner::{
notify_of_termination, rackscale_runner, wait_for_termination, RackscaleRunState,
};

#[cfg(not(feature = "baremetal"))]
#[test]
fn s06_rackscale_shmem_userspace_smoke_test() {
Expand All @@ -38,152 +43,63 @@ fn s06_rackscale_ethernet_userspace_smoke_test() {

#[cfg(not(feature = "baremetal"))]
fn rackscale_userspace_smoke_test(transport: RackscaleTransport) {
use std::sync::Arc;
use std::thread::sleep;
use std::time::Duration;

let timeout = 60_000;

let (tx, rx) = channel();
let all_outputs = Arc::new(Mutex::new(Vec::new()));

setup_network(2);

let (shmem_socket0, shmem_file0) = get_shmem_names(Some(0), false);
let (shmem_socket1, shmem_file1) = get_shmem_names(Some(1), false);
let mut shmem_server0 = spawn_shmem_server(&shmem_socket0, &shmem_file0, SHMEM_SIZE, None)
.expect("Failed to start shmem server 0");
let mut shmem_server1 = spawn_shmem_server(&shmem_socket1, &shmem_file1, SHMEM_SIZE, None)
.expect("Failed to start shmem server 1");

let mut dcm = spawn_dcm(1).expect("Failed to start DCM");

// Create build for both controller and client
let build = Arc::new(
BuildArgs::default()
.module("init")
.user_features(&[
"test-print",
"test-map",
"test-alloc",
"test-upcall",
"test-scheduler",
"test-syscalls",
])
.kernel_feature("shmem")
.kernel_feature("ethernet")
.kernel_feature("rackscale")
.release()
.build(),
);

// Run DCM and controller in separate thread
let controller_output_array = all_outputs.clone();
let build1 = build.clone();
let shmem_sockets = vec![shmem_socket0.clone(), shmem_socket1.clone()];
let controller = std::thread::Builder::new()
.name("Controller".to_string())
.spawn(move || {
let cmdline_controller = RunnerArgs::new_with_build("userspace-smp", &build1)
.timeout(timeout)
.transport(transport)
.mode(RackscaleMode::Controller)
.shmem_size(vec![SHMEM_SIZE as usize; 2])
.shmem_path(shmem_sockets)
.tap("tap0")
.no_network_setup()
.workers(2)
.use_vmxnet3();

let mut output = String::new();
let mut qemu_run = || -> Result<WaitStatus> {
let mut p = spawn_nrk(&cmdline_controller)?;

// wait until all the client is done
let _ = wait_for_client_termination::<()>(&rx);
let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
ret
};
let ret = qemu_run();
controller_output_array
.lock()
.expect("Failed to get mutex to output array")
.push((String::from("Controller"), output));

// This will only find sigterm, that's okay
wait_for_sigterm_or_successful_exit_no_log(
&cmdline_controller,
ret,
String::from("Controller"),
);
})
.expect("Controller thread failed to start");

// Run client in separate thead. Wait a bit to make sure controller started
let client_output_array = all_outputs.clone();
let build2 = build.clone();
let shmem_sockets = vec![shmem_socket0.clone(), shmem_socket1.clone()];
let client = std::thread::Builder::new()
.name("Client".to_string())
.spawn(move || {
sleep(Duration::from_millis(CLIENT_BUILD_DELAY));
let cmdline_client = RunnerArgs::new_with_build("userspace-smp", &build2)
.timeout(timeout)
.transport(transport)
.mode(RackscaleMode::Client)
.shmem_size(vec![SHMEM_SIZE as usize; 2])
.shmem_path(shmem_sockets)
.tap("tap2")
.no_network_setup()
.workers(2)
.cores(1)
.nobuild()
.use_vmxnet3();

let mut output = String::new();
let mut qemu_run = || -> Result<WaitStatus> {
let mut p = spawn_nrk(&cmdline_client)?;
output += p.exp_string("print_test OK")?.as_str();
output += p.exp_string("upcall_test OK")?.as_str();
output += p.exp_string("map_test OK")?.as_str();
output += p.exp_string("alloc_test OK")?.as_str();
output += p.exp_string("scheduler_test OK")?.as_str();
output += p.exp_eof()?.as_str();

// notify controller we are done
notify_controller_of_termination(&tx);

p.process.exit()
};
let ret = qemu_run();
client_output_array
.lock()
.expect("Failed to get mutex for output array")
.push((String::from("Client"), output.clone()));
check_for_successful_exit_no_log(&cmdline_client, ret, String::from("Client"));
})
.expect("Client thread failed to start");

let client_ret = client.join();
let controller_ret = controller.join();

let _ignore = shmem_server0.send_control('c');
let _ignore = shmem_server1.send_control('c');
let _ignore = dcm.process.kill(SIGKILL);
let built = BuildArgs::default()
.module("init")
.user_features(&[
"test-print",
"test-map",
"test-alloc",
"test-upcall",
"test-scheduler",
"test-syscalls",
])
.kernel_feature("shmem")
.kernel_feature("ethernet")
.kernel_feature("rackscale")
.release()
.build();

fn controller_match_function(
proc: &mut PtySession,
output: &mut String,
_cores_per_client: usize,
_num_clients: usize,
) -> Result<()> {
*output += proc.exp_string("print_test OK")?.as_str();
*output += proc.exp_string("upcall_test OK")?.as_str();
*output += proc.exp_string("map_test OK")?.as_str();
*output += proc.exp_string("alloc_test OK")?.as_str();
*output += proc.exp_string("scheduler_test OK")?.as_str();
Ok(())
}

// If there's been an error, print everything
if controller_ret.is_err() || client_ret.is_err() {
let outputs = all_outputs
.lock()
.expect("Failed to get mutex to output array");
for (name, output) in outputs.iter() {
log_qemu_out_with_name(None, name.to_string(), output.to_string());
}
fn client_match_function(
_proc: &mut PtySession,
_output: &mut String,
_cores_per_client: usize,
_num_clients: usize,
) -> Result<()> {
// Do nothing
Ok(())
}

client_ret.unwrap();
controller_ret.unwrap();
let test_run = RackscaleRunState {
controller_timeout: 60_000,
controller_memory: 1024,
controller_match_function,
client_timeout: 60_000,
client_memory: 1024,
client_match_function,
kernel_test: "userspace-smp".to_string(),
built,
num_clients: 1,
cores_per_client: 1,
shmem_size: SHMEM_SIZE,
use_affinity: false,
transport,
};

rackscale_runner(test_run);
}

#[cfg(not(feature = "baremetal"))]
Expand Down Expand Up @@ -238,7 +154,7 @@ fn s06_rackscale_phys_alloc_test() {
let mut qemu_run = || -> Result<WaitStatus> {
let mut p = spawn_nrk(&cmdline_controller)?;

let _ = wait_for_client_termination::<()>(&rx);
let _ = wait_for_termination::<()>(&rx);
let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
ret
Expand Down Expand Up @@ -281,7 +197,7 @@ fn s06_rackscale_phys_alloc_test() {
let mut p = spawn_nrk(&cmdline_client)?;
output += p.exp_string("phys_alloc_test OK")?.as_str();
output += p.exp_eof()?.as_str();
notify_controller_of_termination(&tx);
notify_of_termination(&tx);
p.process.exit()
};
let ret = qemu_run();
Expand Down Expand Up @@ -383,7 +299,7 @@ fn rackscale_fs_test(transport: RackscaleTransport) {
let mut qemu_run = || -> Result<WaitStatus> {
let mut p = spawn_nrk(&cmdline_controller)?;

let _ = wait_for_client_termination::<()>(&rx);
let _ = wait_for_termination::<()>(&rx);
let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
ret
Expand Down Expand Up @@ -428,7 +344,7 @@ fn rackscale_fs_test(transport: RackscaleTransport) {
let mut p = spawn_nrk(&cmdline_client)?;
output += p.exp_string("fs_test OK")?.as_str();
output += p.exp_eof()?.as_str();
notify_controller_of_termination(&tx);
notify_of_termination(&tx);
p.process.exit()
};
let ret = qemu_run();
Expand Down Expand Up @@ -515,7 +431,7 @@ fn s06_rackscale_shmem_fs_prop_test() {
let mut qemu_run = || -> Result<WaitStatus> {
let mut p = spawn_nrk(&cmdline_controller)?;

let _ = wait_for_client_termination::<()>(&rx);
let _ = wait_for_termination::<()>(&rx);
let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
ret
Expand Down Expand Up @@ -558,7 +474,7 @@ fn s06_rackscale_shmem_fs_prop_test() {
let mut p = spawn_nrk(&cmdline_client)?;
output += p.exp_string("fs_prop_test OK")?.as_str();
output += p.exp_eof()?.as_str();
notify_controller_of_termination(&tx);
notify_of_termination(&tx);
p.process.exit()
};
let ret = qemu_run();
Expand Down Expand Up @@ -658,7 +574,7 @@ fn s06_rackscale_shmem_shootdown_test() {

// Notify clients all are done.
for _i in 0..clients {
notify_controller_of_termination(&tx);
notify_of_termination(&tx);
}
let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
Expand Down Expand Up @@ -708,7 +624,7 @@ fn s06_rackscale_shmem_shootdown_test() {

// Wait for the shootdown client to complete
let rx = my_rx_mut.lock().expect("Failed to unwrap rx mutex");
wait_for_client_termination::<()>(&rx);
wait_for_termination::<()>(&rx);

let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
Expand Down Expand Up @@ -829,7 +745,7 @@ fn rackscale_userspace_multicore_test(transport: RackscaleTransport) {
let mut qemu_run = || -> Result<WaitStatus> {
let mut p = spawn_nrk(&cmdline_controller)?;

let _ = wait_for_client_termination::<()>(&rx);
let _ = wait_for_termination::<()>(&rx);
let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
ret
Expand Down Expand Up @@ -880,7 +796,7 @@ fn rackscale_userspace_multicore_test(transport: RackscaleTransport) {
output += r.0.as_str();
output += r.1.as_str();
}
notify_controller_of_termination(&tx);
notify_of_termination(&tx);
let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
ret
Expand Down Expand Up @@ -1004,8 +920,8 @@ fn rackscale_userspace_multicore_multiclient(transport: RackscaleTransport) {
}

// Notify each client it's okay to shutdown
notify_controller_of_termination(&tx1);
notify_controller_of_termination(&tx2);
notify_of_termination(&tx1);
notify_of_termination(&tx2);

let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
Expand Down Expand Up @@ -1063,7 +979,7 @@ fn rackscale_userspace_multicore_multiclient(transport: RackscaleTransport) {
}

// Wait for controller to terminate
let _ = wait_for_client_termination::<()>(&rx1);
let _ = wait_for_termination::<()>(&rx1);

let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
Expand Down Expand Up @@ -1120,7 +1036,7 @@ fn rackscale_userspace_multicore_multiclient(transport: RackscaleTransport) {
}

// Wait for controller to terminate
let _ = wait_for_client_termination::<()>(&rx2);
let _ = wait_for_termination::<()>(&rx2);

let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
Expand Down Expand Up @@ -1231,7 +1147,7 @@ fn rackscale_userspace_rumprt_fs(transport: RackscaleTransport) {
let mut qemu_run = || -> Result<WaitStatus> {
let mut p = spawn_nrk(&cmdline_controller)?;

let _ = wait_for_client_termination::<()>(&rx);
let _ = wait_for_termination::<()>(&rx);
let ret = p.process.kill(SIGTERM);
output += p.exp_eof()?.as_str();
ret
Expand Down Expand Up @@ -1277,7 +1193,7 @@ fn rackscale_userspace_rumprt_fs(transport: RackscaleTransport) {
p.exp_string("bytes_written: 12")?;
p.exp_string("bytes_read: 12")?;
output = p.exp_eof()?;
notify_controller_of_termination(&tx);
notify_of_termination(&tx);
p.process.exit()
};
let ret = qemu_run();
Expand Down Expand Up @@ -1373,7 +1289,7 @@ fn s06_rackscale_controller_shmem_alloc() {

// Notify clients all are done.
for _i in 0..clients {
notify_controller_of_termination(&tx);
notify_of_termination(&tx);
}
p.process.exit()
};
Expand Down Expand Up @@ -1409,7 +1325,7 @@ fn s06_rackscale_controller_shmem_alloc() {

// Wait for the shootdown client to complete
let rx = my_rx_mut.lock().expect("Failed to unwrap rx mutex");
wait_for_client_termination::<()>(&rx);
wait_for_termination::<()>(&rx);

let ret = p.process.kill(SIGTERM);
let _ = p.exp_eof()?.as_str();
Expand Down
Loading

0 comments on commit 7760fbd

Please sign in to comment.