refactor(bench): improve code style (#14062)

Co-authored-by: Lucas Fernandes Nogueira <lucas@tauri.app>
This commit is contained in:
Kushal Meghani 2025-09-01 23:48:20 +05:30 committed by GitHub
parent 2a06d10066
commit a58d461eb0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -2,81 +2,87 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
//! This Rust binary runs on CI and provides internal metrics results of Tauri. To learn more see [benchmark_results](https://github.com/tauri-apps/benchmark_results) repository.
//! This Rust binary runs on CI and provides internal metrics results of Tauri.
//! To learn more see [benchmark_results](https://github.com/tauri-apps/benchmark_results) repository.
//!
//! ***_Internal use only_**
//! ***_Internal use only_***
#![doc(
html_logo_url = "https://github.com/tauri-apps/tauri/raw/dev/.github/icon.png",
html_favicon_url = "https://github.com/tauri-apps/tauri/raw/dev/.github/icon.png"
)]
use anyhow::Result;
use anyhow::{Context, Result};
use std::{
collections::{HashMap, HashSet},
env,
path::Path,
process::{Command, Stdio},
time::Instant,
};
mod utils;
/// The list of the examples of the benchmark name and binary relative path
fn get_all_benchmarks() -> Vec<(String, String)> {
/// The list of examples for benchmarks
fn get_all_benchmarks(target: &str) -> Vec<(String, String)> {
vec![
(
"tauri_hello_world".into(),
format!("../target/{}/release/bench_helloworld", utils::get_target()),
format!("../target/{target}/release/bench_helloworld"),
),
(
"tauri_cpu_intensive".into(),
format!(
"../target/{}/release/bench_cpu_intensive",
utils::get_target()
),
format!("../target/{target}/release/bench_cpu_intensive"),
),
(
"tauri_3mb_transfer".into(),
format!(
"../target/{}/release/bench_files_transfer",
utils::get_target()
),
format!("../target/{target}/release/bench_files_transfer"),
),
]
}
fn run_strace_benchmarks(new_data: &mut utils::BenchResult) -> Result<()> {
fn run_strace_benchmarks(new_data: &mut utils::BenchResult, target: &str) -> Result<()> {
use std::io::Read;
let mut thread_count = HashMap::<String, u64>::new();
let mut syscall_count = HashMap::<String, u64>::new();
for (name, example_exe) in get_all_benchmarks() {
let mut file = tempfile::NamedTempFile::new()?;
for (name, example_exe) in get_all_benchmarks(target) {
let mut file = tempfile::NamedTempFile::new()
.context("failed to create temporary file for strace output")?;
let exe_path = utils::bench_root_path().join(&example_exe);
let exe_path_str = exe_path
.to_str()
.context("executable path contains invalid UTF-8")?;
let temp_path_str = file
.path()
.to_str()
.context("temporary file path contains invalid UTF-8")?;
Command::new("strace")
.args([
"-c",
"-f",
"-o",
file.path().to_str().unwrap(),
utils::bench_root_path().join(example_exe).to_str().unwrap(),
])
.args(["-c", "-f", "-o", temp_path_str, exe_path_str])
.stdout(Stdio::inherit())
.spawn()?
.wait()?;
.spawn()
.context("failed to spawn strace process")?
.wait()
.context("failed to wait for strace process")?;
let mut output = String::new();
file.as_file_mut().read_to_string(&mut output)?;
file
.as_file_mut()
.read_to_string(&mut output)
.context("failed to read strace output")?;
let strace_result = utils::parse_strace_output(&output);
// Note, we always have 1 thread. Use cloneX calls as counter for additional threads created.
let clone = 1
+ strace_result.get("clone").map(|d| d.calls).unwrap_or(0)
// Count clone/clone3 syscalls as thread creation indicators
let clone_calls = strace_result.get("clone").map(|d| d.calls).unwrap_or(0)
+ strace_result.get("clone3").map(|d| d.calls).unwrap_or(0);
let total = strace_result.get("total").unwrap().calls;
thread_count.insert(name.to_string(), clone);
syscall_count.insert(name.to_string(), total);
if let Some(total) = strace_result.get("total") {
thread_count.insert(name.clone(), clone_calls);
syscall_count.insert(name, total.calls);
}
}
new_data.thread_count = thread_count;
@ -85,69 +91,100 @@ fn run_strace_benchmarks(new_data: &mut utils::BenchResult) -> Result<()> {
Ok(())
}
fn run_max_mem_benchmark() -> Result<HashMap<String, u64>> {
fn run_max_mem_benchmark(target: &str) -> Result<HashMap<String, u64>> {
let mut results = HashMap::<String, u64>::new();
for (name, example_exe) in get_all_benchmarks() {
for (name, example_exe) in get_all_benchmarks(target) {
let benchmark_file = utils::target_dir().join(format!("mprof{name}_.dat"));
let benchmark_file = benchmark_file.to_str().unwrap();
let benchmark_file_str = benchmark_file
.to_str()
.context("benchmark file path contains invalid UTF-8")?;
let exe_path = utils::bench_root_path().join(&example_exe);
let exe_path_str = exe_path
.to_str()
.context("executable path contains invalid UTF-8")?;
let proc = Command::new("mprof")
.args([
"run",
"-C",
"-o",
benchmark_file,
utils::bench_root_path().join(example_exe).to_str().unwrap(),
])
.args(["run", "-C", "-o", benchmark_file_str, exe_path_str])
.stdout(Stdio::null())
.stderr(Stdio::piped())
.spawn()?;
.spawn()
.with_context(|| format!("failed to spawn mprof for benchmark {name}"))?;
let proc_result = proc.wait_with_output()?;
println!("{proc_result:?}");
if let Some(max_mem) = utils::parse_max_mem(benchmark_file)? {
results.insert(name.to_string(), max_mem);
let proc_result = proc
.wait_with_output()
.with_context(|| format!("failed to wait for mprof {name}"))?;
if !proc_result.status.success() {
eprintln!(
"mprof failed for {name}: {}",
String::from_utf8_lossy(&proc_result.stderr)
);
}
if let Some(mem) = utils::parse_max_mem(benchmark_file_str)
.with_context(|| format!("failed to parse mprof data for {name}"))?
{
results.insert(name, mem);
}
// Clean up the temporary file
if let Err(e) = std::fs::remove_file(&benchmark_file) {
eprintln!("Warning: failed to remove temporary file {benchmark_file_str}: {e}");
}
}
Ok(results)
}
fn rlib_size(target_dir: &std::path::Path, prefix: &str) -> u64 {
fn rlib_size(target_dir: &Path, prefix: &str) -> Result<u64> {
let mut size = 0;
let mut seen = std::collections::HashSet::new();
let mut seen = HashSet::new();
let deps_dir = target_dir.join("deps");
for entry in std::fs::read_dir(&deps_dir).with_context(|| {
format!(
"failed to read target deps directory: {}",
deps_dir.display()
)
})? {
let entry = entry.context("failed to read directory entry")?;
let name = entry.file_name().to_string_lossy().to_string();
for entry in std::fs::read_dir(target_dir.join("deps")).unwrap() {
let entry = entry.unwrap();
let os_str = entry.file_name();
let name = os_str.to_str().unwrap();
if name.starts_with(prefix) && name.ends_with(".rlib") {
let start = name.split('-').next().unwrap().to_string();
if seen.contains(&start) {
println!("skip {name}");
} else {
seen.insert(start);
size += entry.metadata().unwrap().len();
println!("check size {name} {size}");
if let Some(start) = name.split('-').next() {
if seen.insert(start.to_string()) {
size += entry
.metadata()
.context("failed to read file metadata")?
.len();
}
}
}
}
assert!(size > 0);
size
if size == 0 {
anyhow::bail!(
"no rlib files found for prefix {prefix} in {}",
deps_dir.display()
);
}
Ok(size)
}
fn get_binary_sizes(target_dir: &Path) -> Result<HashMap<String, u64>> {
fn get_binary_sizes(target_dir: &Path, target: &str) -> Result<HashMap<String, u64>> {
let mut sizes = HashMap::<String, u64>::new();
let wry_size = rlib_size(target_dir, "libwry");
println!("wry {wry_size} bytes");
let wry_size = rlib_size(target_dir, "libwry")?;
sizes.insert("wry_rlib".to_string(), wry_size);
// add size for all EXEC_TIME_BENCHMARKS
for (name, example_exe) in get_all_benchmarks() {
let meta = std::fs::metadata(example_exe).unwrap();
sizes.insert(name.to_string(), meta.len());
for (name, example_exe) in get_all_benchmarks(target) {
let exe_path = utils::bench_root_path().join(&example_exe);
let meta = std::fs::metadata(&exe_path)
.with_context(|| format!("failed to read metadata for {}", exe_path.display()))?;
sizes.insert(name, meta.len());
}
Ok(sizes)
@ -187,14 +224,33 @@ fn cargo_deps() -> HashMap<String, usize> {
cmd.args(["--target", target]);
cmd.current_dir(utils::tauri_root_path());
let full_deps = cmd.output().expect("failed to run cargo tree").stdout;
let full_deps = String::from_utf8(full_deps).expect("cargo tree output not utf-8");
let count = full_deps.lines().collect::<HashSet<_>>().len() - 1; // output includes wry itself
match cmd.output() {
Ok(output) if output.status.success() => {
let full_deps = String::from_utf8_lossy(&output.stdout);
let count = full_deps
.lines()
.collect::<HashSet<_>>()
.len()
.saturating_sub(1); // output includes wry itself
// set the count to the highest count seen for this OS
let existing = results.entry(os.to_string()).or_default();
*existing = count.max(*existing);
assert!(count > 10); // sanity check
// set the count to the highest count seen for this OS
let existing = results.entry(os.to_string()).or_default();
*existing = count.max(*existing);
if count <= 10 {
eprintln!("Warning: dependency count for {target} seems low: {count}");
}
}
Ok(output) => {
eprintln!(
"cargo tree failed for {target}: {}",
String::from_utf8_lossy(&output.stderr)
);
}
Err(e) => {
eprintln!("Failed to run cargo tree for {target}: {e}");
}
}
}
}
results
@ -202,104 +258,127 @@ fn cargo_deps() -> HashMap<String, usize> {
const RESULT_KEYS: &[&str] = &["mean", "stddev", "user", "system", "min", "max"];
fn run_exec_time(target_dir: &Path) -> Result<HashMap<String, HashMap<String, f64>>> {
fn run_exec_time(target: &str) -> Result<HashMap<String, HashMap<String, f64>>> {
let target_dir = utils::target_dir();
let benchmark_file = target_dir.join("hyperfine_results.json");
let benchmark_file = benchmark_file.to_str().unwrap();
let benchmark_file_str = benchmark_file
.to_str()
.context("benchmark file path contains invalid UTF-8")?;
let mut command = [
let mut command = vec![
"hyperfine",
"--export-json",
benchmark_file,
benchmark_file_str,
"--show-output",
"--warmup",
"3",
]
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>();
];
for (_, example_exe) in get_all_benchmarks() {
command.push(
utils::bench_root_path()
.join(example_exe)
.to_str()
.unwrap()
.to_string(),
);
let benchmarks = get_all_benchmarks(target);
let mut benchmark_paths = Vec::new();
for (_, example_exe) in &benchmarks {
let exe_path = utils::bench_root_path().join(example_exe);
let exe_path_str = exe_path
.to_str()
.context("executable path contains invalid UTF-8")?;
benchmark_paths.push(exe_path_str.to_string());
}
utils::run(&command.iter().map(|s| s.as_ref()).collect::<Vec<_>>())?;
for path in &benchmark_paths {
command.push(path.as_str());
}
utils::run(&command)?;
let mut results = HashMap::<String, HashMap<String, f64>>::new();
let hyperfine_results = utils::read_json(benchmark_file)?;
for ((name, _), data) in get_all_benchmarks().iter().zip(
hyperfine_results
.as_object()
.unwrap()
.get("results")
.unwrap()
.as_array()
.unwrap(),
) {
let data = data.as_object().unwrap().clone();
results.insert(
name.to_string(),
data
.into_iter()
.filter(|(key, _)| RESULT_KEYS.contains(&key.as_str()))
.map(|(key, val)| (key, val.as_f64().unwrap()))
.collect(),
);
let hyperfine_results = utils::read_json(benchmark_file_str)?;
if let Some(results_array) = hyperfine_results
.as_object()
.and_then(|obj| obj.get("results"))
.and_then(|val| val.as_array())
{
for ((name, _), data) in benchmarks.iter().zip(results_array.iter()) {
if let Some(data_obj) = data.as_object() {
let filtered_data: HashMap<String, f64> = data_obj
.iter()
.filter(|(key, _)| RESULT_KEYS.contains(&key.as_str()))
.filter_map(|(key, val)| val.as_f64().map(|v| (key.clone(), v)))
.collect();
results.insert(name.clone(), filtered_data);
}
}
}
Ok(results)
}
fn main() -> Result<()> {
// download big files if not present
let json_3mb = utils::home_path().join(".tauri_3mb.json");
if !json_3mb.exists() {
println!("Downloading test data...");
utils::download_file(
"https://github.com/lemarier/tauri-test/releases/download/v2.0.0/json_3mb.json",
json_3mb,
)?;
)
.context("failed to download test data")?;
}
println!("Starting tauri benchmark");
let target_dir = utils::target_dir();
let target = utils::get_target();
env::set_current_dir(utils::bench_root_path())?;
env::set_current_dir(utils::bench_root_path())
.context("failed to set working directory to bench root")?;
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.context("failed to get current time")?;
let timestamp = format!("{}", now.as_secs());
println!("Running execution time benchmarks...");
let exec_time = run_exec_time(&target)?;
println!("Getting binary sizes...");
let binary_size = get_binary_sizes(&target_dir, &target)?;
println!("Analyzing cargo dependencies...");
let cargo_deps = cargo_deps();
let format =
time::format_description::parse("[year]-[month]-[day]T[hour]:[minute]:[second]Z").unwrap();
let now = time::OffsetDateTime::now_utc();
let mut new_data = utils::BenchResult {
created_at: now.format(&format).unwrap(),
sha1: utils::run_collect(&["git", "rev-parse", "HEAD"])?
.0
.trim()
.to_string(),
exec_time: run_exec_time(&target_dir)?,
binary_size: get_binary_sizes(&target_dir)?,
cargo_deps: cargo_deps(),
created_at: timestamp,
sha1: {
let output = utils::run_collect(&["git", "rev-parse", "HEAD"])?;
output.0.trim().to_string()
},
exec_time,
binary_size,
cargo_deps,
..Default::default()
};
if cfg!(target_os = "linux") {
run_strace_benchmarks(&mut new_data)?;
new_data.max_memory = run_max_mem_benchmark()?;
println!("Running Linux-specific benchmarks...");
run_strace_benchmarks(&mut new_data, &target)?;
new_data.max_memory = run_max_mem_benchmark(&target)?;
}
println!("===== <BENCHMARK RESULTS>");
serde_json::to_writer_pretty(std::io::stdout(), &new_data)?;
serde_json::to_writer_pretty(std::io::stdout(), &new_data)
.context("failed to serialize benchmark results")?;
println!("\n===== </BENCHMARK RESULTS>");
if let Some(filename) = target_dir.join("bench.json").to_str() {
utils::write_json(filename, &serde_json::to_value(&new_data)?)?;
let bench_file = target_dir.join("bench.json");
if let Some(filename) = bench_file.to_str() {
utils::write_json(filename, &serde_json::to_value(&new_data)?)
.context("failed to write benchmark results to file")?;
println!("Results written to: {}", filename);
} else {
eprintln!("Cannot write bench.json, path is invalid");
eprintln!("Cannot write bench.json, path contains invalid UTF-8");
}
Ok(())