Skip to content

Commit

Permalink
Separated dcmconfig benchmark and ignoring it for now
Browse files Browse the repository at this point in the history
  • Loading branch information
zmckevitt committed Nov 12, 2023
1 parent 5226794 commit 54be99f
Showing 1 changed file with 204 additions and 63 deletions.
267 changes: 204 additions & 63 deletions kernel/tests/s11_rackscale_benchmarks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -468,9 +468,193 @@ struct MemcachedInternalConfig {
#[test]
#[cfg(not(feature = "baremetal"))]
fn s11_rackscale_memcached_benchmark_internal() {
rackscale_memcached_benchmark(RackscaleTransport::Shmem, None);
rackscale_memcached_benchmark(RackscaleTransport::Shmem);
}

#[cfg(not(feature = "baremetal"))]
fn rackscale_memcached_benchmark(transport: RackscaleTransport) {
let is_smoke = cfg!(feature = "smoke");

let file_name = format!(
"rackscale_{}_memcached_benchmark.csv",
transport.to_string(),
);
let _ignore = std::fs::remove_file(file_name.clone());

let built = BuildArgs::default()
.module("rkapps")
.user_feature("rkapps:memcached-bench")
.kernel_feature("pages-4k")
.release()
.build();

fn controller_match_fn(
proc: &mut PtySession,
output: &mut String,
_cores_per_client: usize,
num_clients: usize,
file_name: &str,
is_baseline: bool,
arg: Option<MemcachedInternalConfig>,
) -> Result<()> {
let _config = arg.expect("match function expects a memcached config");

// match the title
let (prev, matched) = proc.exp_regex(r#"INTERNAL BENCHMARK CONFIGURE"#)?;

*output += prev.as_str();
*output += matched.as_str();

// x_benchmark_mem = 10 MB
let (prev, matched) = proc.exp_regex(r#"x_benchmark_mem = (\d+) MB"#)?;
println!("> {}", matched);
let b_mem = matched.replace("x_benchmark_mem = ", "").replace(" MB", "");

*output += prev.as_str();
*output += matched.as_str();

// number of threads: 3
let (prev, matched) = proc.exp_regex(r#"number of threads: (\d+)"#)?;
println!("> {}", matched);
let b_threads = matched.replace("number of threads: ", "");

*output += prev.as_str();
*output += matched.as_str();

// number of keys: 131072
let (prev, matched) = proc.exp_regex(r#"number of keys: (\d+)"#)?;
println!("> {}", matched);

*output += prev.as_str();
*output += matched.as_str();

let (prev, matched) = proc.exp_regex(r#"Executing (\d+) queries with (\d+) threads"#)?;
println!("> {}", matched);

*output += prev.as_str();
*output += matched.as_str();

// benchmark took 129 seconds
let (prev, matched) = proc.exp_regex(r#"benchmark took (\d+) ms"#)?;
println!("> {}", matched);
let b_time = matched.replace("benchmark took ", "").replace(" ms", "");

*output += prev.as_str();
*output += matched.as_str();

// benchmark took 7937984 queries / second
let (prev, matched) = proc.exp_regex(r#"benchmark took (\d+) queries / second"#)?;
println!("> {}", matched);
let b_thpt = matched
.replace("benchmark took ", "")
.replace(" queries / second", "");

*output += prev.as_str();
*output += matched.as_str();

let (prev, matched) = proc.exp_regex(r#"benchmark executed (\d+)"#)?;
println!("> {}", matched);
let b_queries = matched
.replace("benchmark executed ", "")
.split(" ")
.next()
.unwrap()
.to_string();

*output += prev.as_str();
*output += matched.as_str();

// Append parsed results to a CSV file
let write_headers = !Path::new(file_name).exists();
let mut csv_file = OpenOptions::new()
.append(true)
.create(true)
.open(file_name)
.expect("Can't open file");
if write_headers {
let row = "git_rev,benchmark,nthreads,mem,queries,time,thpt,num_clients,num_replicas\n";
let r = csv_file.write(row.as_bytes());
assert!(r.is_ok());
}

let actual_num_clients = if is_baseline { 0 } else { num_clients };

let r = csv_file.write(format!("{},", env!("GIT_HASH")).as_bytes());
assert!(r.is_ok());
let out = format!(
"memcached,{},{},{},{},{},{},{}",
b_threads, b_mem, b_queries, b_time, b_thpt, actual_num_clients, num_clients
);
let r = csv_file.write(out.as_bytes());
assert!(r.is_ok());
let r = csv_file.write("\n".as_bytes());
assert!(r.is_ok());

Ok(())
}

let config = if is_smoke {
MemcachedInternalConfig {
num_queries: 100_000,
mem_size: 16,
}
} else {
MemcachedInternalConfig {
num_queries: 1_000_000, // TODO(rackscale): should be 100_000_000,
mem_size: 16, // TODO(rackscale): should be 32_000,
}
};

let mut test = RackscaleRun::new("userspace-smp".to_string(), built);
test.controller_match_fn = controller_match_fn;
test.transport = transport;
test.shmem_size *= 2;
test.use_affinity_shmem = cfg!(feature = "affinity-shmem");
test.use_qemu_huge_pages = cfg!(feature = "affinity-shmem");
test.file_name = file_name.to_string();
test.arg = Some(config);
test.run_dhcpd_for_baseline = true;

fn cmd_fn(num_cores: usize, arg: Option<MemcachedInternalConfig>) -> String {
let config = arg.expect("missing leveldb config");
format!(
r#"init=memcachedbench.bin initargs={} appcmd='--x-benchmark-mem={} --x-benchmark-queries={}'"#,
num_cores, config.mem_size, config.num_queries
)
}

fn baseline_timeout_fn(num_cores: usize) -> u64 {
120_000 + 500 * num_cores as u64
}

fn rackscale_timeout_fn(num_cores: usize) -> u64 {
240_000 + 5_000 * num_cores as u64
}

fn mem_fn(num_cores: usize, is_smoke: bool) -> usize {
if is_smoke {
8192
} else {
// Memory must also be divisible by number of nodes, which could be 1, 2, 3, or 4
2048 * (((((num_cores + 1) / 2) + 3 - 1) / 3) * 3)
}
}

let bench = RackscaleBench {
test,
cmd_fn,
baseline_timeout_fn,
rackscale_timeout_fn,
mem_fn,
};

if cfg!(feature = "baseline") {
bench.run_bench(true, is_smoke);
}
bench.run_bench(false, is_smoke);
}

#[ignore]
#[test]
#[cfg(not(feature = "baremetal"))]
fn s11_rackscale_dcmconfig_benchmark() {
Expand All @@ -489,19 +673,19 @@ fn s11_rackscale_dcmconfig_benchmark() {
let mut dcm_config = DCMConfig::default();

dcm_config.solver = DCMSolver::DCMloc;
rackscale_memcached_benchmark(RackscaleTransport::Shmem, Some(dcm_config.clone()));
rackscale_memcached_dcm(RackscaleTransport::Shmem, Some(dcm_config.clone()));

dcm_config.solver = DCMSolver::DCMcap;
rackscale_memcached_benchmark(RackscaleTransport::Shmem, Some(dcm_config.clone()));
rackscale_memcached_dcm(RackscaleTransport::Shmem, Some(dcm_config.clone()));

dcm_config.solver = DCMSolver::Random;
rackscale_memcached_benchmark(RackscaleTransport::Shmem, Some(dcm_config.clone()));
rackscale_memcached_dcm(RackscaleTransport::Shmem, Some(dcm_config.clone()));

dcm_config.solver = DCMSolver::RoundRobin;
rackscale_memcached_benchmark(RackscaleTransport::Shmem, Some(dcm_config.clone()));
rackscale_memcached_dcm(RackscaleTransport::Shmem, Some(dcm_config.clone()));

dcm_config.solver = DCMSolver::FillCurrent;
rackscale_memcached_benchmark(RackscaleTransport::Shmem, Some(dcm_config.clone()));
rackscale_memcached_dcm(RackscaleTransport::Shmem, Some(dcm_config.clone()));

let solvers = vec!["DCMloc", "DCMcap", "Random", "RoundRobin", "FillCurrent"];

Expand All @@ -519,7 +703,7 @@ fn s11_rackscale_dcmconfig_benchmark() {
}

#[cfg(not(feature = "baremetal"))]
fn rackscale_memcached_benchmark(transport: RackscaleTransport, dcm_config: Option<DCMConfig>) {
fn rackscale_memcached_dcm(transport: RackscaleTransport, dcm_config: Option<DCMConfig>) {
let is_smoke = cfg!(feature = "smoke");

let file_name = match dcm_config {
Expand Down Expand Up @@ -678,63 +862,20 @@ fn rackscale_memcached_benchmark(transport: RackscaleTransport, dcm_config: Opti
test.arg = Some(config);
test.run_dhcpd_for_baseline = true;

match dcm_config {
Some(dcm_config) => {
test.dcm_config = Some(dcm_config);

let machine = Machine::determine();
let max_cores = machine.max_cores();
let max_numa_nodes = machine.max_numa_nodes();

let cores_per_client = max_cores / max_numa_nodes;
test.num_clients = max_numa_nodes - 1;
test.cores_per_client = cores_per_client;
test.memory = 2048 * (((((cores_per_client + 1) / 2) + 3 - 1) / 3) * 3);
test.cmd = format!("init=memcachedbench.bin initargs={} appcmd=\'--x-benchmark-mem=16 --x-benchmark-queries=1000000\'", cores_per_client);
test.client_timeout = 120_000;
test.controller_timeout = 120_000;
test.run_rackscale();
}
None => {
fn cmd_fn(num_cores: usize, arg: Option<MemcachedInternalConfig>) -> String {
let config = arg.expect("missing leveldb config");
format!(
r#"init=memcachedbench.bin initargs={} appcmd='--x-benchmark-mem={} --x-benchmark-queries={}'"#,
num_cores, config.mem_size, config.num_queries
)
}

fn baseline_timeout_fn(num_cores: usize) -> u64 {
120_000 + 500 * num_cores as u64
}

fn rackscale_timeout_fn(num_cores: usize) -> u64 {
240_000 + 5_000 * num_cores as u64
}

fn mem_fn(num_cores: usize, is_smoke: bool) -> usize {
if is_smoke {
8192
} else {
// Memory must also be divisible by number of nodes, which could be 1, 2, 3, or 4
2048 * (((((num_cores + 1) / 2) + 3 - 1) / 3) * 3)
}
}

let bench = RackscaleBench {
test,
cmd_fn,
baseline_timeout_fn,
rackscale_timeout_fn,
mem_fn,
};
test.dcm_config = Some(dcm_config.unwrap());

if cfg!(feature = "baseline") {
bench.run_bench(true, is_smoke);
}
bench.run_bench(false, is_smoke);
}
};
let machine = Machine::determine();
let max_cores = machine.max_cores();
let max_numa_nodes = machine.max_numa_nodes();

let cores_per_client = max_cores / max_numa_nodes;
test.num_clients = max_numa_nodes - 1;
test.cores_per_client = cores_per_client;
test.memory = 2048 * (((((cores_per_client + 1) / 2) + 3 - 1) / 3) * 3);
test.cmd = format!("init=memcachedbench.bin initargs={} appcmd='--x-benchmark-mem=16 --x-benchmark-queries=1000000'", cores_per_client);
test.client_timeout = 120_000;
test.controller_timeout = 120_000;
test.run_rackscale();
}
}

Expand Down

0 comments on commit 54be99f

Please sign in to comment.