diff --git a/.gitignore b/.gitignore index 7bee6b1e..2fcb0f9c 100644 --- a/.gitignore +++ b/.gitignore @@ -13,4 +13,5 @@ result* completions/ man/ memory-bank/ -security-logs/ \ No newline at end of file +security-logs/ +benchmark-summary.* \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index bfddd475..ecca517d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -28,6 +28,21 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "anstream" version = "0.6.19" @@ -243,6 +258,12 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cc" version = "1.2.27" @@ -266,6 +287,33 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "cipher" version = "0.4.4" @@ -401,6 +449,70 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf7af66b0989381bd0be551bd7cc91912a655a58c6918420c9527b1fd8b4679" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "itertools 0.13.0", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + [[package]] name = "crypto-common" version = "0.1.6" @@ -478,6 +590,12 @@ dependencies = [ "syn", ] +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + [[package]] name = "equivalent" version = "1.0.2" @@ -681,6 +799,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +dependencies = [ + "cfg-if", + "crunchy", +] + [[package]] name = "hashbrown" version = "0.14.5" @@ -957,6 +1085,24 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.15" @@ -1173,6 +1319,12 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + [[package]] name = "openssl-probe" version = "0.1.6" @@ -1255,6 +1407,34 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + [[package]] name = "potential_utf" version = "0.1.2" @@ -1397,6 +1577,26 @@ dependencies = [ "getrandom 0.3.3", ] +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + [[package]] name = "redox_syscall" version = "0.5.13" @@ -1406,6 +1606,35 @@ dependencies = [ "bitflags 2.9.1", ] +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + [[package]] name = "reqwest" version = "0.12.20" @@ -1546,6 +1775,7 @@ dependencies = [ "clap_complete", "clap_complete_nushell", "clap_mangen", + "criterion", "flate2", "libc", "libmimalloc-sys", @@ -1575,6 +1805,15 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "schannel" version = "0.1.27" @@ -1916,6 +2155,16 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.9.0" @@ -2224,6 +2473,16 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -2355,6 +2614,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 31ea4050..724b366b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,20 +12,20 @@ keywords = ["visualization", "ownership", "lifetime", "lsp"] categories = ["development-tools", "visualization"] [dependencies] -serde = { version = "1.0.219", features = ["derive"] } -serde_json = "1.0.140" -log = "0.4.27" -simple_logger = { version = "5.0.0", features = ["stderr"] } -tokio = { version = "1.45.1", features = ["rt", "rt-multi-thread", "macros", "sync", "time", "io-std", "io-util", "process", "fs"] } -tower-lsp = "0.20.0" -process_alive = "0.1.1" -cargo_metadata = "0.20.0" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +log = "0.4" +simple_logger = { version = "5", features = ["stderr"] } +tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros", "sync", "time", "io-std", "io-util", "process", "fs"] } +tower-lsp = "0.20" +process_alive = "0.1" +cargo_metadata = "0.20" uuid = { version = "1", features = ["v4"] } -clap = { version = "4.5.40", features = ["cargo", "derive"] } -reqwest = { version = "0.12.19", default-features = false, features = ["http2", "rustls-tls-native-roots"] } -clap_complete_nushell = "4.5.7" -clap_complete = "4.5.54" -flate2 = "1.1.2" +clap = { version = "4", features = ["cargo", "derive"] } +flate2 = "1" +reqwest = { version = "0.12", default-features = false, features = ["http2", "rustls-tls-native-roots"] } +clap_complete_nushell = "4" +clap_complete = "4" [target.'cfg(not(target_env = "msvc"))'.dependencies] mimalloc = { version = "0.1", default-features = false, features = ['extended'] } @@ -39,10 +39,17 @@ mimalloc = { version = "0.1", default-features = false, features = ['extended', libmimalloc-sys = { version = "0.1", default-features = false, features = ['extended', 'override'] } [build-dependencies] -clap_complete_nushell = "4.5.7" -clap_complete = "4.5.54" -clap_mangen = "0.2.27" -clap = { version = "4.5.40", features = ["derive"] } +clap_complete_nushell = "4" +clap_complete = "4" +clap_mangen = "0.2" +clap = { version = "4", features = ["derive"] } + +[dev-dependencies] +criterion = { version = "0.6", features = ["html_reports"] } + +[[bench]] +name = "rustowl_bench_simple" +harness = false [profile.release] opt-level = 3 @@ -59,7 +66,7 @@ inherits = "release" lto = "off" [profile.security] -inherits = "dev" +inherits = "release" debug = true debug-assertions = true overflow-checks = true diff --git a/benches/rustowl_bench_simple.rs b/benches/rustowl_bench_simple.rs new file mode 100644 index 00000000..cc8595f5 --- /dev/null +++ b/benches/rustowl_bench_simple.rs @@ -0,0 +1,87 @@ +use criterion::{Criterion, criterion_group, criterion_main}; +use std::hint::black_box; +use std::process::Command; +use std::time::Duration; + +fn bench_rustowl_check(c: &mut Criterion) { + let dummy_package = "./perf-tests/dummy-package"; + + let mut group = c.benchmark_group("rustowl_check"); + group + .sample_size(10) + .measurement_time(Duration::from_secs(50)) + .warm_up_time(Duration::from_secs(3)); + + // Ensure rustowl binary is built + let output = Command::new("cargo") + .args(["build", "--release", "--bin", "rustowl"]) + .output() + .expect("Failed to build rustowl"); + + if !output.status.success() { + panic!( + "Failed to build rustowl: {}", + String::from_utf8_lossy(&output.stderr) + ); + } + + let binary_path = "./target/release/rustowl"; + + group.bench_function("default", |b| { + b.iter(|| { + let output = Command::new(binary_path) + .args(["check", dummy_package]) + .output() + .expect("Failed to run rustowl check"); + black_box(output.status.success()); + }) + }); + + group.bench_function("all_targets", |b| { + b.iter(|| { + let output = Command::new(binary_path) + .args(["check", dummy_package, "--all-targets"]) + .output() + .expect("Failed to run rustowl check with all targets"); + black_box(output.status.success()); + }) + }); + + group.bench_function("all_features", |b| { + b.iter(|| { + let output = Command::new(binary_path) + .args(["check", dummy_package, "--all-features"]) + .output() + .expect("Failed to run rustowl check with all features"); + black_box(output.status.success()); + }) + }); + + group.finish(); +} + +fn bench_rustowl_comprehensive(c: &mut Criterion) { + let dummy_package = "./perf-tests/dummy-package"; + let binary_path = "./target/release/rustowl"; + + let mut group = c.benchmark_group("rustowl_comprehensive"); + group + .sample_size(10) + .measurement_time(Duration::from_secs(50)) + .warm_up_time(Duration::from_secs(3)); + + group.bench_function("comprehensive", |b| { + b.iter(|| { + let output = Command::new(binary_path) + .args(["check", dummy_package, "--all-targets", "--all-features"]) + .output() + .expect("Failed to run comprehensive rustowl check"); + black_box(output.status.success()); + }) + }); + + group.finish(); +} + +criterion_group!(benches, bench_rustowl_check, bench_rustowl_comprehensive); +criterion_main!(benches); diff --git a/docs/perf-tests.md b/docs/perf-tests.md deleted file mode 100644 index e9591e0f..00000000 --- a/docs/perf-tests.md +++ /dev/null @@ -1,311 +0,0 @@ -# perf-tests.sh - -**Performance testing and analysis for RustOwl** - -## NAME - -`perf-tests.sh` - Performance testing and analysis for RustOwl - -## SYNOPSIS - -```bash -perf-tests.sh [OPTION]... -``` - -## DESCRIPTION - -The `perf-tests.sh` script provides comprehensive performance testing and analysis for RustOwl. It measures various performance metrics of the current code state, including binary size, execution time, memory usage, symbol counts, and advanced analysis features. - -The script is designed to help developers understand and validate the performance characteristics of their code with flexible metric selection, configurable test runs, and multiple output formats. - -## OPTIONS - -### Modes - -- `-h, --help` - Display help information and exit -- `--verify` - Check the testing environment without running performance tests. Shows system information, tool availability, and enabled metrics. -- `--prepare` - Build the project without running tests. Ensures the binary is ready for testing. -- `--test` - Run performance tests (default mode). Measures selected metrics and displays results. - -### Test Configuration - -- `--runs N` - Number of test runs to perform (default: 3). Results are averaged across multiple runs for better accuracy. -- `--warm` - Skip cache clearing between runs. Tests performance with system caches intact. -- `--cold` - Clear caches between runs (default). Provides consistent measurements by eliminating cache effects. -- `--force` - Force rebuild even if binary exists. Ensures fresh compilation. - -### RustOwl Check Options - -- `--all-targets` - Pass `--all-targets` to rustowl check command. Enables analysis of all targets (bins, examples, tests, benches) instead of just the current target. -- `--all-features` - Pass `--all-features` to rustowl check command. Enables analysis with all available features instead of just the default features. - -### Metrics Selection - -By default, only size and time metrics are enabled. Specifying any metric flag disables defaults and enables only the requested metrics. - -- `--full` - Enable all available metrics -- `--size` - Binary size analysis -- `--time` - Execution time measurement -- `--symbols` - Symbol count analysis (requires nm) -- `--strip` - Strip analysis showing debug symbol overhead -- `--memory` - Memory usage measurement -- `--page-faults` - Page fault counting -- `--context-switches` - Context switch measurement -- `--static` - Static analysis metrics -- `--profile-memory` - Memory profiling with valgrind (Linux only) -- `--profile-cpu` - CPU profiling recommendations - -### Output Options - -- `--markdown FILE` - Save results to markdown file instead of stdout -- `--compare FILE` - Compare current results with previous markdown results file - -## TESTING MODES - -### Default Mode (--test) - -When run without mode arguments, performs performance testing: - -1. Builds the project using specific Rust toolchain -2. Runs performance measurements for selected metrics -3. Averages results across multiple runs -4. Displays summary and optionally saves to markdown - -### System Verification (--verify) - -Check environment readiness: - -1. Verify required tools are available -2. Show optional tool availability -3. Display enabled metrics configuration - -## MEASUREMENTS - -The script provides comprehensive analysis across multiple dimensions: - -### Binary Analysis - -- Binary size measurement with human-readable formatting -- Symbol count analysis (requires `nm`) -- Strip analysis showing debug symbol overhead -- Static analysis including binary entropy and dependencies - -### Performance Metrics - -- Execution time (wall clock) -- Peak memory usage (RSS) -- Page faults (major/minor) -- Context switches (voluntary/involuntary) - -### Advanced Analysis - -When optional tools are available: - -- Statistical benchmarking with confidence intervals (`hyperfine`) -- Memory profiling with heap analysis (`valgrind` on Linux) -- CPU profiling recommendations for platform-specific tools - -## BUILD SYSTEM - -The script uses a specific Rust build configuration: - -### Build Command - -```bash -RUSTC_BOOTSTRAP=1 rustup run 1.87.0 cargo build --release -``` - -This ensures consistent compilation with Rust 1.87.0 stable compiler while enabling nightly features through RUSTC_BOOTSTRAP. - -### Binary Location - -Tests are performed directly on the binary in `./target/release/rustowl` without copying or moving files. - -### Test Command - -The script executes different rustowl check commands based on the selected options: - -- **Default**: `rustowl check ./perf-tests/dummy-package` -- **All Targets**: `rustowl check --all-targets ./perf-tests/dummy-package` -- **All Features**: `rustowl check --all-features ./perf-tests/dummy-package` -- **Comprehensive**: `rustowl check --all-targets --all-features ./perf-tests/dummy-package` - -This allows for performance comparison between different analysis modes. - -## CACHE MANAGEMENT - -Performance measurements can be significantly affected by various system caches. The script provides cache management options: - -### Cache Types - -- Filesystem caches (page cache) - Most significant impact -- Rust incremental compilation cache -- CPU caches (L1/L2/L3 and branch prediction) - -### Cache Clearing Methods - -- macOS: `purge` command (may require sudo) -- Linux: `/proc/sys/vm/drop_caches` (requires sudo) -- Rust: `cargo clean` for incremental cache - -Use `--cold` (default) for consistent measurements or `--warm` for realistic cached performance. - -## RUSTOWL ANALYSIS MODES - -The script supports testing different RustOwl analysis modes to measure performance impact: - -### Default Analysis Mode - -By default, RustOwl performs a fast analysis with standard settings: -- Analyzes only the current target (typically the main binary) -- Uses only default features enabled in Cargo.toml - -### Comprehensive Analysis Mode - -Use `--all-targets` and/or `--all-features` for thorough analysis: - -- `--all-targets`: Analyzes all targets including binaries, examples, tests, and benchmarks -- `--all-features`: Analyzes code with all available features enabled, not just defaults - -### Performance Comparison - -These modes allow benchmarking the performance trade-offs: - -```bash -# Fast analysis (default) -perf-tests.sh --test --runs 5 --markdown fast-analysis.md - -# Comprehensive analysis -perf-tests.sh --test --all-targets --all-features --runs 5 --markdown thorough-analysis.md -``` - -Comprehensive analysis typically takes longer but provides more complete coverage of potential issues. - -## DEPENDENCIES - -### Required Tools - -- `git` - Version control operations -- `cargo` - Rust build system -- `bc` - Mathematical calculations -- `rustup` - Rust toolchain management - -### Optional Tools (Enhanced Features) - -- `hyperfine` - Statistical benchmarking -- `valgrind` - Memory profiling (Linux only) -- `perf` - CPU profiling (Linux only) -- `nm` - Symbol analysis -- `instruments` - CPU profiling (macOS with Xcode) - -## EXAMPLES - -### Basic Testing - -```bash -# Run basic performance test with default metrics (size + time) -perf-tests.sh --test - -# Check system readiness and tool availability -perf-tests.sh --verify - -# Run comprehensive test with all available metrics -perf-tests.sh --test --full -``` - -### Focused Testing - -```bash -# Test memory usage and symbol counts with 5 runs for accuracy -perf-tests.sh --test --memory --symbols --runs 5 - -# Test execution time with warm caches (realistic performance) -perf-tests.sh --test --warm --time - -# Test with comprehensive RustOwl analysis (all targets and features) -perf-tests.sh --test --all-targets --all-features - -# Compare performance of default vs comprehensive analysis -perf-tests.sh --test --runs 3 --markdown default.md -perf-tests.sh --test --all-targets --all-features --runs 3 --markdown comprehensive.md -``` - -### Output and Comparison - -```bash -# Save results to markdown file for documentation -perf-tests.sh --test --markdown results.md - -# Compare current performance with previous results -perf-tests.sh --test --compare baseline.md -``` - -## RESULT INTERPRETATION - -### Normal Results - -- Consistent results across multiple runs -- Reasonable execution times and memory usage -- Expected symbol counts for binary size - -### Investigate Further - -- Extreme performance variations between runs -- Unusually high memory usage or execution time -- Zero page faults (suggests measurement issues) - -### Best Practices - -- Run `--verify` first to check environment -- Use multiple runs for important measurements -- Test on quiet systems with minimal background processes -- Consider both cold and warm cache performance -- Save results to markdown for tracking over time -- Test both default and comprehensive analysis modes to understand performance trade-offs -- Use `--all-targets --all-features` to benchmark worst-case analysis performance -- Compare analysis modes with consistent run counts for accurate benchmarking - -## OUTPUT FORMATS - -### Standard Output - -Default format displays metrics to stdout with colored output and progress indicators. - -### Markdown Output - -Use `--markdown FILE` to generate structured markdown reports suitable for documentation and tracking. - -### Comparison Output - -Use `--compare FILE` to compare current results with previously saved markdown results, showing differences and changes. - -## EXIT STATUS - -- **0** - Success -- **1** - General error (missing tools, build failure, binary verification failure) - -## FILES - -- `./target/release/rustowl` - Main binary tested by the script -- `./target/release/rustowlc` - Compiler binary (also built but not directly tested) - -## ENVIRONMENT - -- `RUSTC_BOOTSTRAP` - Set to 1 for stable compiler with nightly features -- `CARGO_TARGET_DIR` - Cargo build directory (default: target/) -- `NO_COLOR` - Disable colored output - -## NOTES - -This script is designed for development environments and focuses on testing the current code state without git-based comparisons. It prioritizes reproducibility and comprehensive metrics over raw speed. - -For accurate performance measurements, consider running multiple iterations and using appropriate cache management options based on your testing goals. - -## SEE ALSO - -- `cargo(1)` -- `rustup(1)` -- `hyperfine(1)` -- `valgrind(1)` - -RustOwl documentation: docs/CONTRIBUTING.md diff --git a/perf-tests/perf-tests.sh b/perf-tests/perf-tests.sh deleted file mode 100755 index 478d2894..00000000 --- a/perf-tests/perf-tests.sh +++ /dev/null @@ -1,1112 +0,0 @@ -#!/bin/bash -# RustOwl Performance Testing Script -# Tests current code state with configurable metrics and output formats - -set -e - -# Color codes for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -BOLD='\033[1m' -NC='\033[0m' # No Color - -# Global configuration -OS=$(uname) -RUNS=3 -COLD_CACHE=1 -OUTPUT_FORMAT="stdio" # stdio, markdown -OUTPUT_FILE="" -COMPARE_FILE="" -FORCE_BUILD=0 -MODE="test" - -# RustOwl check command flags -CHECK_ALL_TARGETS=0 -CHECK_ALL_FEATURES=0 - -# Metric flags (default: size, time, and memory) -METRIC_SIZE=1 -METRIC_TIME=1 -METRIC_SYMBOLS=0 -METRIC_STRIP=0 -METRIC_MEMORY=1 -METRIC_PAGE_FAULTS=0 -METRIC_CONTEXT_SWITCHES=1 -METRIC_STATIC=0 -METRIC_PROFILE_MEMORY=0 -METRIC_PROFILE_CPU=0 - -# Available tools detection -HAS_HYPERFINE=0 -HAS_VALGRIND=0 -HAS_NM=0 -HAS_PERF=0 -HAS_INSTRUMENTS=0 - -# Build configuration -RUST_VERSION="1.87.0" -BUILD_CMD="RUSTC_BOOTSTRAP=1 rustup run $RUST_VERSION cargo build --release" -BINARY_PATH="./target/release/rustowl" -TEST_TARGET_PATH="./perf-tests/dummy-package" - -# Test results storage for aggregation -declare -a SIZE_RESULTS=() -declare -a TIME_RESULTS=() -declare -a MEMORY_RESULTS=() -declare -a SYMBOLS_RESULTS=() -declare -a PAGE_FAULT_RESULTS=() -declare -a CONTEXT_SWITCH_RESULTS=() - -usage() { - echo "Usage: $0 [OPTIONS]" - echo "" - echo "Modes:" - echo " --verify Check system readiness and tool availability" - echo " --prepare Build the project without running tests" - echo " --test Run performance tests (default if no mode specified)" - echo "" - echo "Test Configuration:" - echo " --runs N Number of test runs (default: 3)" - echo " --warm Skip cache clearing between runs" - echo " --cold Clear caches between runs (default)" - echo " --force Force rebuild even if binary exists" - echo "" - echo "RustOwl Check Options:" - echo " --all-targets Pass --all-targets to rustowl check command" - echo " --all-features Pass --all-features to rustowl check command" - echo "" - echo "Metrics (default: size,time,memory):" - echo " --full Enable all available metrics" - echo " --size Binary size analysis" - echo " --time Execution time measurement" - echo " --symbols Symbol count analysis" - echo " --strip Strip analysis (debug symbol overhead)" - echo " --memory Memory usage measurement" - echo " --page-faults Page fault analysis" - echo " --context-switches Context switch measurement" - echo " --static Static analysis metrics" - echo " --profile-memory Memory profiling (valgrind)" - echo " --profile-cpu CPU profiling recommendations" - echo "" - echo "Output:" - echo " --stdout Output to standard output (default)" - echo " --markdown FILE Output results to markdown file" - echo " --compare FILE Compare with existing markdown results" - echo "" - echo "Examples:" - echo " $0 # Basic test (size + time, 3 runs, cold)" - echo " $0 --verify # Check system readiness" - echo " $0 --full --runs 5 # Full metrics, 5 runs" - echo " $0 --all-targets --all-features # Test with comprehensive analysis" - echo " $0 --markdown results.md # Save results to file" - echo " $0 --compare old-results.md # Compare with previous results" - echo " $0 --memory --warm --runs 1 # Memory test, warm cache, single run" - echo "" - echo "Test Package:" - echo " The script uses a dummy Rust package located at './perf-tests/dummy-package'" - echo " for consistent performance testing. This package contains various Rust" - echo " patterns that rustowl can analyze, including potential ownership issues," - echo " error handling patterns, and resource management scenarios." - echo "" -} - -enable_all_metrics() { - METRIC_SIZE=1 - METRIC_TIME=1 - METRIC_SYMBOLS=1 - METRIC_STRIP=1 - METRIC_MEMORY=1 - METRIC_PAGE_FAULTS=1 - METRIC_CONTEXT_SWITCHES=1 - METRIC_STATIC=1 - METRIC_PROFILE_MEMORY=1 - METRIC_PROFILE_CPU=1 -} - -reset_default_metrics() { - METRIC_SIZE=1 - METRIC_TIME=1 - METRIC_MEMORY=1 -} - -# Parse command line arguments -parse_args() { - local mode_specified=0 - local metrics_specified=0 - local all_args=("$@") - - while [[ $# -gt 0 ]]; do - case $1 in - -h|--help) - usage - exit 0 - ;; - --verify) - MODE="verify" - mode_specified=1 - shift - ;; - --prepare) - MODE="prepare" - mode_specified=1 - shift - ;; - --test) - MODE="test" - mode_specified=1 - shift - ;; - --runs) - RUNS="$2" - if ! [[ "$RUNS" =~ ^[0-9]+$ ]] || [ "$RUNS" -lt 1 ]; then - echo -e "${RED}Error: --runs must be a positive integer${NC}" - exit 1 - fi - shift 2 - ;; - --warm) - COLD_CACHE=0 - shift - ;; - --cold) - COLD_CACHE=1 - shift - ;; - --force) - FORCE_BUILD=1 - shift - ;; - --all-targets) - CHECK_ALL_TARGETS=1 - shift - ;; - --all-features) - CHECK_ALL_FEATURES=1 - shift - ;; - --full) - enable_all_metrics - metrics_specified=1 - shift - ;; - --size) - if [ $metrics_specified -eq 0 ]; then - reset_default_metrics - metrics_specified=1 - fi - METRIC_SIZE=1 - shift - ;; - --time) - if [ $metrics_specified -eq 0 ]; then - reset_default_metrics - metrics_specified=1 - fi - METRIC_TIME=1 - shift - ;; - --symbols) - if [ $metrics_specified -eq 0 ]; then - reset_default_metrics - metrics_specified=1 - fi - METRIC_SYMBOLS=1 - shift - ;; - --strip) - if [ $metrics_specified -eq 0 ]; then - reset_default_metrics - metrics_specified=1 - fi - METRIC_STRIP=1 - shift - ;; - --memory) - if [ $metrics_specified -eq 0 ]; then - reset_default_metrics - metrics_specified=1 - fi - METRIC_MEMORY=1 - shift - ;; - --page-faults) - if [ $metrics_specified -eq 0 ]; then - reset_default_metrics - metrics_specified=1 - fi - METRIC_PAGE_FAULTS=1 - shift - ;; - --context-switches) - if [ $metrics_specified -eq 0 ]; then - reset_default_metrics - metrics_specified=1 - fi - METRIC_CONTEXT_SWITCHES=1 - shift - ;; - --static) - if [ $metrics_specified -eq 0 ]; then - reset_default_metrics - metrics_specified=1 - fi - METRIC_STATIC=1 - shift - ;; - --profile-memory) - if [ $metrics_specified -eq 0 ]; then - reset_default_metrics - metrics_specified=1 - fi - METRIC_PROFILE_MEMORY=1 - shift - ;; - --profile-cpu) - if [ $metrics_specified -eq 0 ]; then - reset_default_metrics - metrics_specified=1 - fi - METRIC_PROFILE_CPU=1 - shift - ;; - --stdout) - OUTPUT_FORMAT="stdio" - shift - ;; - --markdown) - OUTPUT_FORMAT="markdown" - OUTPUT_FILE="$2" - if [ -z "$OUTPUT_FILE" ]; then - echo -e "${RED}Error: --markdown requires a filename${NC}" - exit 1 - fi - shift 2 - ;; - --compare) - COMPARE_FILE="$2" - if [ -z "$COMPARE_FILE" ] || [ ! -f "$COMPARE_FILE" ]; then - echo -e "${RED}Error: --compare requires an existing markdown file${NC}" - exit 1 - fi - shift 2 - ;; - *) - echo -e "${RED}Unknown option: $1${NC}" - usage - exit 1 - ;; - esac - done - - # Set default mode if none specified - if [ $mode_specified -eq 0 ]; then - MODE="test" - fi -} - -# Detect available tools -detect_tools() { - if command -v hyperfine >/dev/null 2>&1; then - HAS_HYPERFINE=1 - fi - if command -v valgrind >/dev/null 2>&1; then - HAS_VALGRIND=1 - fi - if command -v nm >/dev/null 2>&1; then - HAS_NM=1 - fi - if command -v perf >/dev/null 2>&1; then - HAS_PERF=1 - fi - if command -v instruments >/dev/null 2>&1; then - HAS_INSTRUMENTS=1 - fi -} - -# Check required tools -check_required_tools() { - local missing_tools=() - - command -v rustup >/dev/null 2>&1 || missing_tools+=("rustup") - command -v cargo >/dev/null 2>&1 || missing_tools+=("cargo") - command -v bc >/dev/null 2>&1 || missing_tools+=("bc") - - # Check if specific Rust version is available - if ! rustup run "$RUST_VERSION" rustc --version >/dev/null 2>&1; then - missing_tools+=("rust-$RUST_VERSION") - fi - - if [ ${#missing_tools[@]} -ne 0 ]; then - echo -e "${RED}Error: Missing required tools: ${missing_tools[*]}${NC}" - echo "Install missing tools:" - for tool in "${missing_tools[@]}"; do - case $tool in - rustup) echo " curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh" ;; - cargo) echo " (included with rustup)" ;; - bc) echo " macOS: brew install bc | Linux: apt/yum install bc" ;; - rust-$RUST_VERSION) echo " rustup install $RUST_VERSION" ;; - esac - done - exit 1 - fi -} - -print_header() { - if [ "$OUTPUT_FORMAT" = "stdio" ]; then - echo -e "${BLUE}${BOLD}RustOwl Performance Testing${NC}" - echo -e "${BLUE}===========================${NC}" - echo "OS: $OS" - echo "Rust version: $RUST_VERSION" - echo "Runs: $RUNS" - echo "Cache mode: $([ $COLD_CACHE -eq 1 ] && echo "Cold" || echo "Warm")" - echo "Mode: $MODE" - echo "" - fi -} - -# Clear system caches -clear_caches() { - if [ $COLD_CACHE -eq 0 ]; then - return - fi - - case "$OS" in - Darwin) - if command -v purge >/dev/null 2>&1; then - echo " Clearing macOS caches..." - if purge 2>/dev/null; then - echo " ✓ System cache cleared" - else - echo " ℹ System cache clearing skipped (would require sudo)" - fi - fi - ;; - Linux) - echo " Clearing Linux filesystem cache..." - if command -v sudo >/dev/null 2>&1; then - sync - if echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null 2>&1; then - echo " ✓ System cache cleared" - sudo ldconfig 2>/dev/null || true - else - echo " ℹ System cache clearing skipped (would require root)" - fi - fi - ;; - esac - - # Clear RustOwl analysis cache for main project - if [ -d "./target/owl" ]; then - echo " Clearing RustOwl analysis cache..." - rm -rf ./target/owl 2>/dev/null || true - echo " ✓ RustOwl analysis cache cleared" - fi - - # Clear RustOwl analysis cache for dummy package - if [ -d "$TEST_TARGET_PATH/target/owl" ]; then - echo " Clearing dummy package RustOwl analysis cache..." - rm -rf "$TEST_TARGET_PATH/target/owl" 2>/dev/null || true - echo " ✓ Dummy package RustOwl analysis cache cleared" - fi - - # Clear Rust incremental compilation cache for main project - if [ -d "./target" ]; then - echo " Clearing Rust incremental cache..." - rm -rf ./target/release/incremental 2>/dev/null || true - echo " ✓ Rust incremental cache cleared" - fi - - # Clear Rust incremental compilation cache for dummy package - if [ -d "$TEST_TARGET_PATH/target" ]; then - echo " Clearing dummy package Rust incremental cache..." - rm -rf "$TEST_TARGET_PATH/target/incremental" 2>/dev/null || true - rm -rf "$TEST_TARGET_PATH/target/debug/incremental" 2>/dev/null || true - echo " ✓ Dummy package Rust incremental cache cleared" - fi - - sleep 1 -} - -verify_system() { - echo -e "${BLUE}System Verification${NC}" - echo "==================" - - check_required_tools - detect_tools - - echo -e "${GREEN}✓ Required tools available${NC}" - echo "" - - echo "Tool availability:" - echo " rustup: ✓" - echo " cargo: ✓" - echo " bc: ✓" - echo " rust $RUST_VERSION: ✓" - echo " hyperfine: $([ $HAS_HYPERFINE -eq 1 ] && echo "✓" || echo "✗")" - echo " valgrind: $([ $HAS_VALGRIND -eq 1 ] && echo "✓" || echo "✗")" - echo " nm: $([ $HAS_NM -eq 1 ] && echo "✓" || echo "✗")" - echo " perf: $([ $HAS_PERF -eq 1 ] && echo "✓" || echo "✗")" - echo " instruments: $([ $HAS_INSTRUMENTS -eq 1 ] && echo "✓" || echo "✗")" - echo "" - - echo "Enabled metrics:" - [ $METRIC_SIZE -eq 1 ] && echo " ✓ Binary size" - [ $METRIC_TIME -eq 1 ] && echo " ✓ Execution time" - [ $METRIC_SYMBOLS -eq 1 ] && echo " ✓ Symbol analysis" - [ $METRIC_STRIP -eq 1 ] && echo " ✓ Strip analysis" - [ $METRIC_MEMORY -eq 1 ] && echo " ✓ Memory usage" - [ $METRIC_PAGE_FAULTS -eq 1 ] && echo " ✓ Page faults" - [ $METRIC_CONTEXT_SWITCHES -eq 1 ] && echo " ✓ Context switches" - [ $METRIC_STATIC -eq 1 ] && echo " ✓ Static analysis" - [ $METRIC_PROFILE_MEMORY -eq 1 ] && echo " ✓ Memory profiling" - [ $METRIC_PROFILE_CPU -eq 1 ] && echo " ✓ CPU profiling" - echo "" - - if [ ! -z "$OUTPUT_FILE" ]; then - echo "Output file: $OUTPUT_FILE" - fi - - if [ ! -z "$COMPARE_FILE" ]; then - echo "Comparison file: $COMPARE_FILE" - fi - - echo "" - - echo "Test package verification:" - if [ -d "$TEST_TARGET_PATH" ]; then - echo " ✓ Test package directory exists: $TEST_TARGET_PATH" - if [ -f "$TEST_TARGET_PATH/Cargo.toml" ]; then - echo " ✓ Test package Cargo.toml found" - else - echo " ✗ Test package Cargo.toml missing" - fi - else - echo " ✗ Test package directory missing: $TEST_TARGET_PATH" - fi -} - -build_project() { - echo -e "${BLUE}Building Project${NC}" - echo "================" - - # Check if rebuild is needed - if [ -f "$BINARY_PATH" ] && [ $FORCE_BUILD -eq 0 ]; then - echo -e "${YELLOW}Binary exists. Use --force to rebuild.${NC}" - return 0 - fi - - echo "Build command: $BUILD_CMD" - echo "" - - # Clean previous build if forced - if [ $FORCE_BUILD -eq 1 ]; then - echo "Cleaning previous build..." - cargo clean --release 2>/dev/null || true - fi - - echo "Building release binary..." - if ! eval "$BUILD_CMD"; then - echo -e "${RED}✗ Build failed${NC}" - exit 1 - fi - - if [ ! -f "$BINARY_PATH" ]; then - echo -e "${RED}✗ Binary not found after build: $BINARY_PATH${NC}" - exit 1 - fi - - # Verify binary works - if ! "$BINARY_PATH" --version >/dev/null 2>&1; then - echo -e "${RED}✗ Binary verification failed${NC}" - exit 1 - fi - - echo -e "${GREEN}✓ Build completed successfully${NC}" - echo "" -} - -prepare_test_package() { - echo -e "${BLUE}Preparing test package${NC}" - echo "=====================" - - if [ ! -d "$TEST_TARGET_PATH" ]; then - echo -e "${RED}✗ Test package directory not found: $TEST_TARGET_PATH${NC}" - exit 1 - fi - - if [ ! -f "$TEST_TARGET_PATH/Cargo.toml" ]; then - echo -e "${RED}✗ Test package Cargo.toml not found: $TEST_TARGET_PATH/Cargo.toml${NC}" - exit 1 - fi - - echo -e "${GREEN}✓ Test package ready for analysis${NC}" - echo "" -} - -# Metric measurement functions -measure_size() { - if [ $METRIC_SIZE -eq 0 ]; then return; fi - - local size_bytes=$(stat -f%z "$BINARY_PATH" 2>/dev/null || stat -c%s "$BINARY_PATH" 2>/dev/null) - local size_human=$(ls -lh "$BINARY_PATH" | awk '{print $5}') - - SIZE_RESULTS+=("$size_bytes") - echo "Binary size: $size_human ($size_bytes bytes)" -} - -measure_symbols() { - if [ $METRIC_SYMBOLS -eq 0 ] || [ $HAS_NM -eq 0 ]; then return; fi - - local symbols=$(nm "$BINARY_PATH" 2>/dev/null | wc -l | tr -d ' ') - SYMBOLS_RESULTS+=("$symbols") - echo "Symbol count: $symbols" -} - -measure_strip() { - if [ $METRIC_STRIP -eq 0 ]; then return; fi - - local temp_binary="/tmp/$(basename $BINARY_PATH)_stripped" - cp "$BINARY_PATH" "$temp_binary" - strip "$temp_binary" 2>/dev/null - local original_size=$(stat -f%z "$BINARY_PATH" 2>/dev/null || stat -c%s "$BINARY_PATH" 2>/dev/null) - local stripped_size=$(stat -f%z "$temp_binary" 2>/dev/null || stat -c%s "$temp_binary" 2>/dev/null) - local savings=$((original_size - stripped_size)) - local savings_percent=$(echo "scale=1; ($savings * 100.0) / $original_size" | bc -l) - - echo "Stripped size: $(ls -lh $temp_binary | awk '{print $5}') (saves $savings_percent%)" - rm -f "$temp_binary" -} - -build_check_command() { - local cmd="$BINARY_PATH check" - - if [ $CHECK_ALL_TARGETS -eq 1 ]; then - cmd="$cmd --all-targets" - fi - - if [ $CHECK_ALL_FEATURES -eq 1 ]; then - cmd="$cmd --all-features" - fi - - cmd="$cmd $TEST_TARGET_PATH" - echo "$cmd" -} - -measure_time_and_resources() { - if [ $METRIC_TIME -eq 0 ] && [ $METRIC_MEMORY -eq 0 ] && [ $METRIC_PAGE_FAULTS -eq 0 ] && [ $METRIC_CONTEXT_SWITCHES -eq 0 ]; then - return - fi - - clear_caches - - case "$OS" in - Darwin) - # Use manual timing for clean output, /usr/bin/time for system metrics - if [ $METRIC_TIME -eq 1 ]; then - local start_time=$(date +%s.%N) - eval "$(build_check_command)" >/dev/null 2>&1 || true - local end_time=$(date +%s.%N) - local real_time=$(echo "scale=2; $end_time - $start_time" | bc -l) - TIME_RESULTS+=("$real_time") - echo "Execution time: ${real_time}s" - fi - - # Get system metrics if needed - if [ $METRIC_MEMORY -eq 1 ] || [ $METRIC_PAGE_FAULTS -eq 1 ] || [ $METRIC_CONTEXT_SWITCHES -eq 1 ]; then - # Create temporary files to separate output - local time_stderr=$(mktemp) - local cmd_output=$(mktemp) - - # Run with time and separate outputs properly - eval "/usr/bin/time -l $(build_check_command)" >"$cmd_output" 2>"$time_stderr" || true - - local time_output=$(cat "$time_stderr") - rm -f "$time_stderr" "$cmd_output" - - if [ $METRIC_MEMORY -eq 1 ]; then - local memory_bytes=$(echo "$time_output" | grep "maximum resident set size" | awk '{print $1}') - local memory_mb=$((memory_bytes / 1024 / 1024)) - MEMORY_RESULTS+=("$memory_mb") - echo "Peak memory: ${memory_mb} MB (${memory_bytes} bytes)" - fi - - if [ $METRIC_PAGE_FAULTS -eq 1 ]; then - local page_faults=$(echo "$time_output" | grep "page reclaims" | awk '{print $1}') - PAGE_FAULT_RESULTS+=("$page_faults") - echo "Page faults: $page_faults" - fi - - if [ $METRIC_CONTEXT_SWITCHES -eq 1 ]; then - local vol_cs=$(echo "$time_output" | grep "voluntary context switches" | grep -v "involuntary" | awk '{print $1}') - local invol_cs=$(echo "$time_output" | grep "involuntary context switches" | awk '{print $1}') - CONTEXT_SWITCH_RESULTS+=("$vol_cs") - echo "Voluntary context switches: $vol_cs" - echo "Involuntary context switches: $invol_cs" - fi - fi - ;; - Linux) - if [ $METRIC_TIME -eq 1 ]; then - local start_time=$(date +%s.%N) - eval "$(build_check_command)" >/dev/null 2>&1 || true - local end_time=$(date +%s.%N) - local real_time=$(echo "scale=2; $end_time - $start_time" | bc -l) - TIME_RESULTS+=("$real_time") - echo "Execution time: $real_time" - fi - - if [ $METRIC_MEMORY -eq 1 ] || [ $METRIC_PAGE_FAULTS -eq 1 ] || [ $METRIC_CONTEXT_SWITCHES -eq 1 ]; then - # Create temporary files to capture time output - local time_stderr=$(mktemp) - local cmd_output=$(mktemp) - - # Run with time and capture stderr separately - eval "/usr/bin/time -v $(build_check_command)" >"$cmd_output" 2>"$time_stderr" || true - - local time_output=$(cat "$time_stderr") - rm -f "$time_stderr" "$cmd_output" - - if [ $METRIC_MEMORY -eq 1 ]; then - local memory_kb=$(echo "$time_output" | grep "Maximum resident set size" | awk '{print $NF}') - if [ ! -z "$memory_kb" ] && [ "$memory_kb" != "" ]; then - local memory_mb=$((memory_kb / 1024)) - MEMORY_RESULTS+=("$memory_mb") - echo "Peak memory: ${memory_mb} MB (${memory_kb} KB)" - else - echo "Peak memory: Unable to measure" - fi - fi - - if [ $METRIC_PAGE_FAULTS -eq 1 ]; then - local page_faults=$(echo "$time_output" | grep "Major (requiring I/O) page faults" | awk '{print $NF}') - if [ ! -z "$page_faults" ] && [ "$page_faults" != "" ]; then - PAGE_FAULT_RESULTS+=("$page_faults") - echo "Page faults: $page_faults" - else - echo "Page faults: Unable to measure" - fi - fi - - if [ $METRIC_CONTEXT_SWITCHES -eq 1 ]; then - local vol_cs=$(echo "$time_output" | grep "Voluntary context switches" | awk '{print $NF}') - local invol_cs=$(echo "$time_output" | grep "Involuntary context switches" | awk '{print $NF}') - if [ ! -z "$vol_cs" ] && [ "$vol_cs" != "" ]; then - CONTEXT_SWITCH_RESULTS+=("$vol_cs") - echo "Voluntary context switches: $vol_cs" - echo "Involuntary context switches: $invol_cs" - else - echo "Context switches: Unable to measure" - fi - fi - fi - ;; - esac -} - -measure_static_analysis() { - if [ $METRIC_STATIC -eq 0 ]; then return; fi - - echo "Static analysis metrics:" - - # File size metrics - local size_bytes=$(stat -f%z "$BINARY_PATH" 2>/dev/null || stat -c%s "$BINARY_PATH" 2>/dev/null) - echo " Binary entropy: $(echo "$size_bytes" | md5sum | cut -c1-8)" - - # Dependencies (if available) - if command -v ldd >/dev/null 2>&1; then - local deps=$(ldd "$BINARY_PATH" 2>/dev/null | wc -l) - echo " Dynamic dependencies: $deps" - elif command -v otool >/dev/null 2>&1; then - local deps=$(otool -L "$BINARY_PATH" 2>/dev/null | tail -n +2 | wc -l) - echo " Dynamic dependencies: $deps" - fi -} - -profile_memory() { - if [ $METRIC_PROFILE_MEMORY -eq 0 ] || [ $HAS_VALGRIND -eq 0 ]; then return; fi - - echo "Memory profiling (valgrind):" - clear_caches - - # Add timeout to prevent hanging (5 minutes max) - timeout 300 valgrind --tool=massif --stacks=yes --time-unit=B \ - --massif-out-file=/tmp/massif.out \ - "$BINARY_PATH" check "$TEST_TARGET_PATH" >/dev/null 2>&1 || true - - if [ -f "/tmp/massif.out" ]; then - local peak_mem=$(grep "mem_heap_B=" /tmp/massif.out | sort -t= -k2 -n | tail -1 | cut -d= -f2) - if [ ! -z "$peak_mem" ] && [ "$peak_mem" != "" ]; then - local peak_mb=$((peak_mem / 1024 / 1024)) - echo " Peak heap usage: ${peak_mb} MB" - else - echo " Peak heap usage: Unable to measure" - fi - rm -f /tmp/massif.out - else - echo " Peak heap usage: Valgrind timed out or failed" - fi -} - -profile_cpu() { - if [ $METRIC_PROFILE_CPU -eq 0 ]; then return; fi - - echo "CPU profiling recommendations:" - - case "$OS" in - Darwin) - if [ $HAS_INSTRUMENTS -eq 1 ]; then - echo " Run: instruments -t 'CPU Profiler' $BINARY_PATH check" - else - echo " Install Xcode command line tools for 'instruments'" - fi - ;; - Linux) - if [ $HAS_PERF -eq 1 ]; then - echo " Run: perf record -g $BINARY_PATH check && perf report" - else - echo " Install 'perf' for CPU profiling" - fi - ;; - esac -} - -calculate_averages() { - if [ ${#TIME_RESULTS[@]} -gt 0 ]; then - local sum=0 - for time in "${TIME_RESULTS[@]}"; do - # Convert time to integer (multiply by 100 to handle decimals) - local time_int=$(echo "$time * 100" | bc -l | cut -d. -f1) - sum=$((sum + time_int)) - done - AVG_TIME=$(echo "scale=2; $sum / ${#TIME_RESULTS[@]} / 100" | bc -l) - fi - - if [ ${#SIZE_RESULTS[@]} -gt 0 ]; then - local sum=0 - for size in "${SIZE_RESULTS[@]}"; do - sum=$((sum + size)) - done - AVG_SIZE=$((sum / ${#SIZE_RESULTS[@]})) - fi - - if [ ${#MEMORY_RESULTS[@]} -gt 0 ]; then - local sum=0 - for mem in "${MEMORY_RESULTS[@]}"; do - sum=$((sum + mem)) - done - AVG_MEMORY=$((sum / ${#MEMORY_RESULTS[@]})) - fi - - if [ ${#PAGE_FAULT_RESULTS[@]} -gt 0 ]; then - local sum=0 - for pf in "${PAGE_FAULT_RESULTS[@]}"; do - sum=$((sum + pf)) - done - AVG_PAGE_FAULTS=$((sum / ${#PAGE_FAULT_RESULTS[@]})) - fi - - if [ ${#CONTEXT_SWITCH_RESULTS[@]} -gt 0 ]; then - local sum=0 - for cs in "${CONTEXT_SWITCH_RESULTS[@]}"; do - sum=$((sum + cs)) - done - AVG_CONTEXT_SWITCHES=$((sum / ${#CONTEXT_SWITCH_RESULTS[@]})) - fi - - if [ ${#SYMBOLS_RESULTS[@]} -gt 0 ]; then - local sum=0 - for sym in "${SYMBOLS_RESULTS[@]}"; do - sum=$((sum + sym)) - done - AVG_SYMBOLS=$((sum / ${#SYMBOLS_RESULTS[@]})) - fi -} - -run_single_test() { - local run_num=$1 - - if [ "$OUTPUT_FORMAT" = "stdio" ]; then - echo -e "${BLUE}Run $run_num/$RUNS${NC}" - echo "--------" - fi - - measure_size - measure_symbols - measure_strip - measure_time_and_resources - measure_static_analysis - profile_memory - profile_cpu - - if [ "$OUTPUT_FORMAT" = "stdio" ]; then - echo "" - fi -} - -run_performance_tests() { - echo -e "${BLUE}Performance Testing${NC}" - echo "===================" - - if [ ! -f "$BINARY_PATH" ]; then - echo -e "${RED}✗ Test binary not found. Run --prepare first.${NC}" - exit 1 - fi - - for ((i=1; i<=RUNS; i++)); do - run_single_test $i - done - - # Calculate averages (always, even for single runs) - calculate_averages - - # Display summary if multiple runs - if [ $RUNS -gt 1 ]; then - echo -e "${BLUE}Summary (averaged over $RUNS runs)${NC}" - echo "==================================" - [ ! -z "$AVG_SIZE" ] && echo "Average binary size: $(ls -lh $BINARY_PATH | awk '{print $5}') ($AVG_SIZE bytes)" - [ ! -z "$AVG_TIME" ] && echo "Average execution time: ${AVG_TIME}s" - [ ! -z "$AVG_MEMORY" ] && echo "Average peak memory: ${AVG_MEMORY} MB" - [ ! -z "$AVG_SYMBOLS" ] && echo "Average symbol count: $AVG_SYMBOLS" - echo "" - fi - - echo -e "${GREEN}✓ Performance testing completed${NC}" -} - -output_to_markdown() { - echo "# RustOwl Performance Test Results" > "$OUTPUT_FILE" - echo "" >> "$OUTPUT_FILE" - echo "**Date:** $(date)" >> "$OUTPUT_FILE" - echo "**OS:** $OS" >> "$OUTPUT_FILE" - echo "**Rust Version:** $RUST_VERSION" >> "$OUTPUT_FILE" - echo "**Runs:** $RUNS" >> "$OUTPUT_FILE" - echo "**Cache Mode:** $([ $COLD_CACHE -eq 1 ] && echo "Cold" || echo "Warm")" >> "$OUTPUT_FILE" - echo "**Build Command:** \`RUSTC_BOOTSTRAP=1 rustup run 1.87.0 cargo build --release\`" >> "$OUTPUT_FILE" - echo "**Test Command:** \`$(build_check_command)\`" >> "$OUTPUT_FILE" - echo "" >> "$OUTPUT_FILE" - - # Summary table - echo "## Performance Summary" >> "$OUTPUT_FILE" - echo "" >> "$OUTPUT_FILE" - echo "| Metric | Value | Unit |" >> "$OUTPUT_FILE" - echo "|--------|-------|------|" >> "$OUTPUT_FILE" - - if [ ! -z "$AVG_SIZE" ]; then - local size_human=$(ls -lh "$BINARY_PATH" | awk '{print $5}') - echo "| Binary Size | $size_human | ($AVG_SIZE bytes) |" >> "$OUTPUT_FILE" - fi - - if [ ! -z "$AVG_TIME" ]; then - echo "| Average Execution Time | ${AVG_TIME}s | seconds |" >> "$OUTPUT_FILE" - fi - - if [ ! -z "$AVG_MEMORY" ]; then - echo "| Average Peak Memory | ${AVG_MEMORY} MB | megabytes |" >> "$OUTPUT_FILE" - fi - - if [ ! -z "$AVG_PAGE_FAULTS" ]; then - echo "| Average Page Faults | $AVG_PAGE_FAULTS | count |" >> "$OUTPUT_FILE" - fi - - if [ ! -z "$AVG_CONTEXT_SWITCHES" ]; then - echo "| Average Context Switches | $AVG_CONTEXT_SWITCHES | count |" >> "$OUTPUT_FILE" - fi - - if [ ! -z "$AVG_SYMBOLS" ]; then - echo "| Average Symbol Count | $AVG_SYMBOLS | symbols |" >> "$OUTPUT_FILE" - fi - - if [ ! -z "$STRIPPED_SIZE" ]; then - local stripped_human=$(echo "$STRIPPED_SIZE" | awk '{printf "%.1fM", $1/1024/1024}') - local reduction=$((AVG_SIZE - STRIPPED_SIZE)) - local reduction_percent=$(echo "scale=1; ($reduction * 100.0) / $AVG_SIZE" | bc -l) - echo "| Stripped Binary Size | $stripped_human | ($STRIPPED_SIZE bytes, -$reduction_percent%) |" >> "$OUTPUT_FILE" - fi - echo "" >> "$OUTPUT_FILE" - - # Individual run data if multiple runs - if [ $RUNS -gt 1 ]; then - echo "## Individual Run Results" >> "$OUTPUT_FILE" - echo "" >> "$OUTPUT_FILE" - - if [ ${#TIME_RESULTS[@]} -gt 0 ]; then - echo "### Execution Times" >> "$OUTPUT_FILE" - echo "" >> "$OUTPUT_FILE" - echo "| Run | Time (seconds) |" >> "$OUTPUT_FILE" - echo "|-----|----------------|" >> "$OUTPUT_FILE" - for i in "${!TIME_RESULTS[@]}"; do - echo "| $((i+1)) | ${TIME_RESULTS[$i]}s |" >> "$OUTPUT_FILE" - done - echo "" >> "$OUTPUT_FILE" - fi - - if [ ${#MEMORY_RESULTS[@]} -gt 0 ]; then - echo "### Peak Memory Usage" >> "$OUTPUT_FILE" - echo "" >> "$OUTPUT_FILE" - echo "| Run | Memory (MB) |" >> "$OUTPUT_FILE" - echo "|-----|-------------|" >> "$OUTPUT_FILE" - for i in "${!MEMORY_RESULTS[@]}"; do - echo "| $((i+1)) | ${MEMORY_RESULTS[$i]} MB |" >> "$OUTPUT_FILE" - done - echo "" >> "$OUTPUT_FILE" - fi - - if [ ${#PAGE_FAULT_RESULTS[@]} -gt 0 ]; then - echo "### Page Faults" >> "$OUTPUT_FILE" - echo "" >> "$OUTPUT_FILE" - echo "| Run | Page Faults |" >> "$OUTPUT_FILE" - echo "|-----|-------------|" >> "$OUTPUT_FILE" - for i in "${!PAGE_FAULT_RESULTS[@]}"; do - echo "| $((i+1)) | ${PAGE_FAULT_RESULTS[$i]} |" >> "$OUTPUT_FILE" - done - echo "" >> "$OUTPUT_FILE" - fi - - if [ ${#CONTEXT_SWITCH_RESULTS[@]} -gt 0 ]; then - echo "### Context Switches" >> "$OUTPUT_FILE" - echo "" >> "$OUTPUT_FILE" - echo "| Run | Context Switches |" >> "$OUTPUT_FILE" - echo "|-----|------------------|" >> "$OUTPUT_FILE" - for i in "${!CONTEXT_SWITCH_RESULTS[@]}"; do - echo "| $((i+1)) | ${CONTEXT_SWITCH_RESULTS[$i]} |" >> "$OUTPUT_FILE" - done - echo "" >> "$OUTPUT_FILE" - fi - fi - - # System information - echo "## System Information" >> "$OUTPUT_FILE" - echo "" >> "$OUTPUT_FILE" - echo "- **Architecture:** $(uname -m)" >> "$OUTPUT_FILE" - echo "- **Kernel:** $(uname -r)" >> "$OUTPUT_FILE" - if command -v sysctl >/dev/null 2>&1; then - echo "- **CPU:** $(sysctl -n machdep.cpu.brand_string 2>/dev/null || echo "Unknown")" >> "$OUTPUT_FILE" - echo "- **CPU Cores:** $(sysctl -n hw.ncpu 2>/dev/null || echo "Unknown")" >> "$OUTPUT_FILE" - echo "- **Total Memory:** $(echo "scale=1; $(sysctl -n hw.memsize 2>/dev/null || echo 0) / 1024 / 1024 / 1024" | bc -l)GB" >> "$OUTPUT_FILE" - fi - echo "" >> "$OUTPUT_FILE" - - # Build information - echo "## Build Information" >> "$OUTPUT_FILE" - echo "" >> "$OUTPUT_FILE" - echo "- **Target:** $(rustc --version --verbose | grep "host:" | cut -d' ' -f2)" >> "$OUTPUT_FILE" - echo "- **Profile:** release" >> "$OUTPUT_FILE" - echo "- **Optimization:** \`opt-level = 3\`" >> "$OUTPUT_FILE" - echo "" >> "$OUTPUT_FILE" - - echo "---" >> "$OUTPUT_FILE" - echo "*Generated by RustOwl performance testing script v1.0*" >> "$OUTPUT_FILE" - - echo -e "${GREEN}✓ Comprehensive results saved to $OUTPUT_FILE${NC}" -} - -compare_with_file() { - echo -e "${BLUE}Comparing with $COMPARE_FILE${NC}" - echo "=================================" - - if [ ! -f "$COMPARE_FILE" ]; then - echo -e "${RED}✗ Comparison file not found: $COMPARE_FILE${NC}" - return - fi - - # Extract previous results - local prev_size=$(grep "Binary Size:" "$COMPARE_FILE" 2>/dev/null | sed 's/.*(\([0-9]*\) bytes).*/\1/') - local prev_memory=$(grep "Average Peak Memory:" "$COMPARE_FILE" 2>/dev/null | sed 's/.*Memory:\** \([0-9]*\) MB.*/\1/') - local prev_symbols=$(grep "Average Symbol Count:" "$COMPARE_FILE" 2>/dev/null | sed 's/.*Count:\** \([0-9]*\).*/\1/') - - echo "Comparison Results:" - echo "===================" - - if [ ! -z "$AVG_SIZE" ] && [ ! -z "$prev_size" ]; then - local size_diff=$((AVG_SIZE - prev_size)) - local size_percent=$(echo "scale=1; ($size_diff * 100.0) / $prev_size" | bc -l) - local size_human=$(ls -lh "$BINARY_PATH" | awk '{print $5}') - - if [ $size_diff -gt 0 ]; then - echo -e "Binary Size: $size_human (${RED}+$size_diff bytes, +$size_percent%${NC})" - elif [ $size_diff -lt 0 ]; then - echo -e "Binary Size: $size_human (${GREEN}$size_diff bytes, $size_percent%${NC})" - else - echo "Binary Size: $size_human (no change)" - fi - fi - - if [ ! -z "$AVG_MEMORY" ] && [ ! -z "$prev_memory" ]; then - local mem_diff=$((AVG_MEMORY - prev_memory)) - local mem_percent=$(echo "scale=1; ($mem_diff * 100.0) / $prev_memory" | bc -l) - - if [ $mem_diff -gt 0 ]; then - echo -e "Peak Memory: ${AVG_MEMORY} MB (${RED}+$mem_diff MB, +$mem_percent%${NC})" - elif [ $mem_diff -lt 0 ]; then - echo -e "Peak Memory: ${AVG_MEMORY} MB (${GREEN}$mem_diff MB, $mem_percent%${NC})" - else - echo "Peak Memory: ${AVG_MEMORY} MB (no change)" - fi - fi - - if [ ! -z "$AVG_SYMBOLS" ] && [ ! -z "$prev_symbols" ]; then - local sym_diff=$((AVG_SYMBOLS - prev_symbols)) - local sym_percent=$(echo "scale=1; ($sym_diff * 100.0) / $prev_symbols" | bc -l) - - if [ $sym_diff -gt 0 ]; then - echo -e "Symbol Count: $AVG_SYMBOLS (${RED}+$sym_diff symbols, +$sym_percent%${NC})" - elif [ $sym_diff -lt 0 ]; then - echo -e "Symbol Count: $AVG_SYMBOLS (${GREEN}$sym_diff symbols, $sym_percent%${NC})" - else - echo "Symbol Count: $AVG_SYMBOLS (no change)" - fi - fi - - echo "" -} - -cleanup() { - # No longer need to clean up TEST_BINARY since we use BINARY_PATH directly - true -} - -# Main execution -main() { - parse_args "$@" - print_header - check_required_tools - detect_tools - - case "$MODE" in - verify) - verify_system - ;; - prepare) - build_project - prepare_test_package - ;; - test) - build_project - prepare_test_package - run_performance_tests - - if [ "$OUTPUT_FORMAT" = "markdown" ]; then - output_to_markdown - fi - - if [ ! -z "$COMPARE_FILE" ]; then - compare_with_file - fi - ;; - *) - echo -e "${RED}Unknown mode: $MODE${NC}" - usage - exit 1 - ;; - esac - - cleanup -} - -# Trap cleanup on exit -trap cleanup EXIT - -# Run main function -main "$@" diff --git a/scripts/bench.sh b/scripts/bench.sh new file mode 100755 index 00000000..3a0a8724 --- /dev/null +++ b/scripts/bench.sh @@ -0,0 +1,648 @@ +#!/bin/bash +# Local performance benchmarking script for RustOwl +# This script provides an easy way to run Criterion benchmarks locally +# Local performance benchmarking script for development use + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +BOLD='\033[1m' +NC='\033[0m' # No Color + +# Configuration +BENCHMARK_NAME="rustowl_bench_simple" + +# Look for existing test packages in the repo +TEST_PACKAGES=( + "./tests/fixtures" + "./benches/fixtures" + "./test-data" + "./examples" + "./perf-tests" +) + +# Options +OPEN_REPORT=false +SAVE_BASELINE="" +LOAD_BASELINE="" +COMPARE_MODE=false +CLEAN_BUILD=false +SHOW_OUTPUT=true +REGRESSION_THRESHOLD="5%" +TEST_PACKAGE_PATH="" + +usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Performance Benchmarking Script for RustOwl" + echo "Runs Criterion benchmarks with comparison and regression detection capabilities" + echo "" + echo "Options:" + echo " -h, --help Show this help message" + echo " --save Save benchmark results as baseline with given name" + echo " --load Load baseline and compare current results against it" + echo " --threshold Set regression threshold (default: 5%)" + echo " --test-package Use specific test package (auto-detected if not specified)" + echo " --open Open HTML report in browser after benchmarking" + echo " --clean Clean build artifacts before benchmarking" + echo " --quiet Minimal output (for CI/automated use)" + echo "" + echo "Examples:" + echo " $0 # Run benchmarks with default settings" + echo " $0 --save main # Save results as 'main' baseline" + echo " $0 --load main --threshold 3% # Compare against 'main' with 3% threshold" + echo " $0 --clean --open # Clean build, run benchmarks, open report" + echo " $0 --save current --quiet # Save baseline quietly (for CI)" + echo "" + echo "Baseline Management:" + echo " Baselines are stored in: baselines/performance//" + echo " HTML reports are in: target/criterion/report/" + echo "" +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + usage + exit 0 + ;; + --save) + if [[ -z "$2" ]]; then + echo -e "${RED}Error: --save requires a baseline name${NC}" + echo "Example: $0 --save main" + exit 1 + fi + SAVE_BASELINE="$2" + shift 2 + ;; + --load) + if [[ -z "$2" ]]; then + echo -e "${RED}Error: --load requires a baseline name${NC}" + echo "Example: $0 --load main" + exit 1 + fi + LOAD_BASELINE="$2" + COMPARE_MODE=true + shift 2 + ;; + --threshold) + if [[ -z "$2" ]]; then + echo -e "${RED}Error: --threshold requires a percentage${NC}" + echo "Example: $0 --threshold 3%" + exit 1 + fi + REGRESSION_THRESHOLD="$2" + shift 2 + ;; + --test-package) + if [[ -z "$2" ]]; then + echo -e "${RED}Error: --test-package requires a path${NC}" + echo "Example: $0 --test-package ./examples/sample" + exit 1 + fi + TEST_PACKAGE_PATH="$2" + shift 2 + ;; + --open) + OPEN_REPORT=true + shift + ;; + --clean) + CLEAN_BUILD=true + shift + ;; + --quiet) + SHOW_OUTPUT=false + shift + ;; + baseline) + # Legacy support for CI workflow + SAVE_BASELINE="main" + SHOW_OUTPUT=false + shift + ;; + compare) + # Legacy support for CI workflow + COMPARE_MODE=true + LOAD_BASELINE="main" + shift + ;; + *) + echo -e "${RED}Unknown option: $1${NC}" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +print_header() { + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${BLUE}${BOLD}=====================================${NC}" + echo -e "${BLUE}${BOLD} RustOwl Performance Benchmarks${NC}" + echo -e "${BLUE}${BOLD}=====================================${NC}" + echo "" + + if [[ -n "$SAVE_BASELINE" ]]; then + echo -e "${GREEN}Mode: Save baseline as '$SAVE_BASELINE'${NC}" + elif [[ "$COMPARE_MODE" == "true" ]]; then + echo -e "${GREEN}Mode: Compare against '$LOAD_BASELINE' baseline${NC}" + echo -e "${GREEN}Regression threshold: $REGRESSION_THRESHOLD${NC}" + else + echo -e "${GREEN}Mode: Standard benchmark run${NC}" + fi + echo "" + fi +} + +find_test_package() { + if [[ -n "$TEST_PACKAGE_PATH" ]]; then + if [[ -d "$TEST_PACKAGE_PATH" ]]; then + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Using specified test package: $TEST_PACKAGE_PATH${NC}" + fi + return 0 + else + echo -e "${RED}Error: Specified test package not found: $TEST_PACKAGE_PATH${NC}" + exit 1 + fi + fi + + # Auto-detect existing test packages + for test_dir in "${TEST_PACKAGES[@]}"; do + if [[ -d "$test_dir" ]]; then + # Check if it contains Rust code + if find "$test_dir" -name "*.rs" | head -1 >/dev/null 2>&1; then + TEST_PACKAGE_PATH="$test_dir" + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Found test package: $TEST_PACKAGE_PATH${NC}" + fi + return 0 + fi + # Check if it contains Cargo.toml files (subdirectories with packages) + if find "$test_dir" -name "Cargo.toml" | head -1 >/dev/null 2>&1; then + TEST_PACKAGE_PATH=$(find "$test_dir" -name "Cargo.toml" | head -1 | xargs dirname) + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Found test package: $TEST_PACKAGE_PATH${NC}" + fi + return 0 + fi + fi + done + + # Look for existing benchmark files + if [[ -d "./benches" ]]; then + TEST_PACKAGE_PATH="./benches" + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Using benchmark directory: $TEST_PACKAGE_PATH${NC}" + fi + return 0 + fi + + # Use the current project as test package + if [[ -f "./Cargo.toml" ]]; then + TEST_PACKAGE_PATH="." + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Using current project as test package${NC}" + fi + return 0 + fi + + echo -e "${RED}Error: No suitable test package found in the repository${NC}" + echo -e "${YELLOW}Searched in: ${TEST_PACKAGES[*]}${NC}" + echo -e "${YELLOW}Use --test-package to specify a custom location${NC}" + exit 1 +} + +check_prerequisites() { + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}Checking prerequisites...${NC}" + fi + + # Check Rust installation (any version is fine - we trust rust-toolchain.toml) + if ! command -v rustc >/dev/null 2>&1; then + echo -e "${RED}Error: Rust is not installed${NC}" + echo -e "${YELLOW}Please install Rust: https://rustup.rs/${NC}" + exit 1 + fi + + # Show current Rust version + local rust_version=$(rustc --version) + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Rust: $rust_version${NC}" + echo -e "${GREEN}✓ Cargo: $(cargo --version)${NC}" + echo -e "${GREEN}✓ Host: $(rustc -vV | grep host | cut -d' ' -f2)${NC}" + fi + + # Check if cargo-criterion is available + if command -v cargo-criterion >/dev/null 2>&1; then + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ cargo-criterion is available${NC}" + fi + else + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}! cargo-criterion not found, using cargo bench${NC}" + fi + fi + + # Find and validate test package + find_test_package + + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo "" + fi +} + +clean_build() { + if [[ "$CLEAN_BUILD" == "true" ]]; then + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}Cleaning build artifacts...${NC}" + fi + cargo clean + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Build artifacts cleaned${NC}" + echo "" + fi + fi +} + +build_rustowl() { + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}Building RustOwl in release mode...${NC}" + fi + + if [[ "$SHOW_OUTPUT" == "true" ]]; then + RUSTC_BOOTSTRAP=1 cargo build --release + else + RUSTC_BOOTSTRAP=1 cargo build --release --quiet + fi + + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Build completed${NC}" + echo "" + fi +} + +run_benchmarks() { + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}Running performance benchmarks...${NC}" + fi + + # Check if we have any benchmark files + if [[ -d "./benches" ]] && find "./benches" -name "*.rs" | head -1 >/dev/null 2>&1; then + # Prepare benchmark command + local bench_cmd="cargo bench" + local bench_args="" + + # Use cargo-criterion if available and not doing baseline operations + if command -v cargo-criterion >/dev/null 2>&1 && [[ -z "$SAVE_BASELINE" && "$COMPARE_MODE" != "true" ]]; then + bench_cmd="cargo criterion" + fi + + # Add baseline arguments if saving + if [[ -n "$SAVE_BASELINE" ]]; then + bench_args="$bench_args --bench rustowl_bench_simple -- --save-baseline $SAVE_BASELINE" + fi + + # Add baseline arguments if comparing + if [[ "$COMPARE_MODE" == "true" && -n "$LOAD_BASELINE" ]]; then + bench_args="$bench_args --bench rustowl_bench_simple -- --baseline $LOAD_BASELINE" + fi + + # If no baseline operations, run all benchmarks + if [[ -z "$SAVE_BASELINE" && "$COMPARE_MODE" != "true" ]]; then + bench_args="$bench_args --bench rustowl_bench_simple" + fi + + # Run the benchmarks + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${BLUE}Running: $bench_cmd $bench_args${NC}" + $bench_cmd $bench_args + else + $bench_cmd $bench_args --quiet 2>/dev/null || $bench_cmd $bench_args >/dev/null 2>&1 + fi + else + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}! No benchmark files found in ./benches, skipping Criterion benchmarks${NC}" + fi + fi + + # Run specific RustOwl analysis benchmarks using real test data + if [[ -f "./target/release/rustowl" || -f "./target/release/rustowl.exe" ]]; then + local rustowl_binary="./target/release/rustowl" + if [[ -f "./target/release/rustowl.exe" ]]; then + rustowl_binary="./target/release/rustowl.exe" + fi + + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}Running RustOwl analysis benchmark on: $TEST_PACKAGE_PATH${NC}" + fi + + # Time the analysis of the test package + local start_time=$(date +%s.%N 2>/dev/null || date +%s) + + if [[ "$SHOW_OUTPUT" == "true" ]]; then + timeout 120 "$rustowl_binary" check "$TEST_PACKAGE_PATH" 2>/dev/null || true + else + timeout 120 "$rustowl_binary" check "$TEST_PACKAGE_PATH" >/dev/null 2>&1 || true + fi + + local end_time=$(date +%s.%N 2>/dev/null || date +%s) + + # Calculate duration (handle both nanosecond and second precision) + local duration + if command -v bc >/dev/null 2>&1 && [[ "$start_time" == *.* ]]; then + duration=$(echo "$end_time - $start_time" | bc -l 2>/dev/null || echo "N/A") + else + duration=$((end_time - start_time)) + fi + + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Analysis completed in ${duration}s${NC}" + fi + + # Save timing info for comparison + if [[ -n "$SAVE_BASELINE" ]]; then + mkdir -p "baselines/performance/$SAVE_BASELINE" + echo "$duration" > "baselines/performance/$SAVE_BASELINE/analysis_time.txt" + echo "$TEST_PACKAGE_PATH" > "baselines/performance/$SAVE_BASELINE/test_package.txt" + fi + + # Compare timing if in compare mode + if [[ "$COMPARE_MODE" == "true" && -f "baselines/performance/$LOAD_BASELINE/analysis_time.txt" ]]; then + local baseline_time=$(cat "baselines/performance/$LOAD_BASELINE/analysis_time.txt") + compare_analysis_times "$baseline_time" "$duration" + fi + else + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}! RustOwl binary not found, skipping analysis benchmark${NC}" + fi + fi + + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Benchmarks completed${NC}" + echo "" + fi +} + +compare_analysis_times() { + local baseline_time="$1" + local current_time="$2" + + if [[ "$baseline_time" == "N/A" || "$current_time" == "N/A" ]]; then + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}! Could not compare analysis times (timing unavailable)${NC}" + fi + return 0 + fi + + # Calculate percentage change (handle integer vs float) + local change="0" + local abs_change="0" + + if command -v bc >/dev/null 2>&1; then + change=$(echo "scale=2; (($current_time - $baseline_time) / $baseline_time) * 100" | bc -l 2>/dev/null || echo "0") + abs_change=$(echo "$change" | sed 's/-//') + else + # Simple integer math fallback + if [[ "$current_time" -gt "$baseline_time" ]]; then + change="positive" + abs_change="significant" + elif [[ "$current_time" -lt "$baseline_time" ]]; then + change="negative" + abs_change="significant" + else + change="0" + abs_change="0" + fi + fi + + # Extract threshold number + local threshold_num=$(echo "$REGRESSION_THRESHOLD" | sed 's/%//') + + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${BLUE}Analysis Time Comparison:${NC}" + echo -e " Baseline: ${baseline_time}s" + echo -e " Current: ${current_time}s" + echo -e " Change: ${change}%" + + # Show what test package was used for baseline + if [[ -f "baselines/performance/$LOAD_BASELINE/test_package.txt" ]]; then + local baseline_package=$(cat "baselines/performance/$LOAD_BASELINE/test_package.txt") + echo -e " Test Package: ${baseline_package}" + fi + fi + + # Check for regression + if (( $(echo "$abs_change > $threshold_num" | bc -l 2>/dev/null || echo "0") )); then + if (( $(echo "$change > 0" | bc -l 2>/dev/null || echo "0") )); then + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${RED}⚠ Performance regression detected! (+${abs_change}% > ${REGRESSION_THRESHOLD})${NC}" + fi + return 1 + else + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Performance improvement detected! (-${abs_change}%)${NC}" + fi + fi + else + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ Performance within acceptable range (±${abs_change}% ≤ ${REGRESSION_THRESHOLD})${NC}" + fi + fi + + return 0 +} + +# Analyze benchmark output for regressions +analyze_regressions() { + if [[ "$COMPARE_MODE" != "true" ]]; then + return 0 + fi + + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}Analyzing benchmark results for regressions...${NC}" + fi + + # Look for Criterion output files + local criterion_dir="target/criterion" + local regression_found=false + + if [[ -d "$criterion_dir" ]]; then + # Check for regression indicators in Criterion reports (simplified) + if find "$criterion_dir" -name "*.html" -print0 2>/dev/null | xargs -0 grep -l "regressed\|slower" 2>/dev/null | head -1 >/dev/null; then + regression_found=true + fi + + # Create a comprehensive summary file for CI + if [[ -f "$criterion_dir/report/index.html" ]]; then + cat > benchmark-summary.txt << EOF +# RustOwl Benchmark Summary +Generated: $(date) +Test Package: $TEST_PACKAGE_PATH +Mode: $(if [[ -n "$SAVE_BASELINE" ]]; then echo "Save baseline ($SAVE_BASELINE)"; elif [[ "$COMPARE_MODE" == "true" ]]; then echo "Compare against $LOAD_BASELINE"; else echo "Standard run"; fi) + +## Reports Available +- HTML Report: target/criterion/report/index.html +$(find "$criterion_dir" -name "index.html" | grep -v "^target/criterion/report/index.html$" | sed 's/^/- Individual: /' || true) + +## Benchmark Results Summary +EOF + + # Extract key timing information from JSON files + if command -v jq >/dev/null 2>&1; then + echo "### Detailed Timings (JSON extracted)" >> benchmark-summary.txt + find "$criterion_dir" -name "estimates.json" -exec sh -c 'echo "$(dirname "$1" | sed "s|target/criterion/||"): $(jq -r ".mean.point_estimate" "$1" 2>/dev/null || echo "N/A")s"' _ {} \; >> benchmark-summary.txt 2>/dev/null || true + else + echo "### Quick Summary (grep extracted)" >> benchmark-summary.txt + find "$criterion_dir" -name "*.json" -exec grep -h "\"mean\"" {} \; 2>/dev/null | head -10 >> benchmark-summary.txt || true + fi + + # Add regression status if comparing + if [[ "$COMPARE_MODE" == "true" ]]; then + echo "" >> benchmark-summary.txt + echo "## Regression Analysis" >> benchmark-summary.txt + if [[ "$regression_found" == "true" ]]; then + echo "⚠️ REGRESSION DETECTED" >> benchmark-summary.txt + else + echo "✅ No significant regressions" >> benchmark-summary.txt + fi + echo "Threshold: $REGRESSION_THRESHOLD" >> benchmark-summary.txt + fi + fi + fi + + if [[ "$regression_found" == "true" ]]; then + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${RED}⚠ Performance regressions detected in detailed analysis${NC}" + echo -e "${YELLOW}Check the HTML report for details: target/criterion/report/index.html${NC}" + fi + return 1 + else + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${GREEN}✓ No significant regressions detected${NC}" + fi + return 0 + fi +} + +open_report() { + if [[ "$OPEN_REPORT" == "true" && -f "target/criterion/report/index.html" ]]; then + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}Opening benchmark report...${NC}" + fi + + # Try to open the report in the default browser + if command -v xdg-open >/dev/null 2>&1; then + xdg-open "target/criterion/report/index.html" 2>/dev/null & + elif command -v open >/dev/null 2>&1; then + open "target/criterion/report/index.html" 2>/dev/null & + elif command -v start >/dev/null 2>&1; then + start "target/criterion/report/index.html" 2>/dev/null & + else + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${YELLOW}Could not auto-open report. Please open: target/criterion/report/index.html${NC}" + fi + fi + fi +} + +show_results_location() { + if [[ "$SHOW_OUTPUT" == "true" ]]; then + echo -e "${BLUE}${BOLD}Results Location:${NC}" + + if [[ -f "target/criterion/report/index.html" ]]; then + echo -e "${GREEN}✓ HTML Report: target/criterion/report/index.html${NC}" + fi + + if [[ -n "$SAVE_BASELINE" && -d "baselines/performance/$SAVE_BASELINE" ]]; then + echo -e "${GREEN}✓ Saved baseline: baselines/performance/$SAVE_BASELINE/${NC}" + fi + + if [[ -f "benchmark-summary.txt" ]]; then + echo -e "${GREEN}✓ Summary: benchmark-summary.txt${NC}" + fi + + echo -e "${BLUE}✓ Test package used: $TEST_PACKAGE_PATH${NC}" + + echo "" + echo -e "${YELLOW}Tips:${NC}" + echo -e " • Use --open to automatically open the HTML report" + echo -e " • Use --save to create a baseline for future comparisons" + echo -e " • Use --load to compare against a saved baseline" + echo -e " • Use --test-package to benchmark specific test data" + echo "" + fi +} + +# Create a basic summary file even without detailed Criterion data +create_basic_summary() { + # Create a basic summary file even without detailed Criterion data + if [[ ! -f "benchmark-summary.txt" ]]; then + cat > benchmark-summary.txt << EOF +# RustOwl Benchmark Summary +Generated: $(date) +Test Package: $TEST_PACKAGE_PATH +Mode: $(if [[ -n "$SAVE_BASELINE" ]]; then echo "Save baseline ($SAVE_BASELINE)"; elif [[ "$COMPARE_MODE" == "true" ]]; then echo "Compare against $LOAD_BASELINE"; else echo "Standard run"; fi) + +## Analysis Performance +EOF + + # Add analysis timing if available + if [[ -n "$SAVE_BASELINE" && -f "baselines/performance/$SAVE_BASELINE/analysis_time.txt" ]]; then + local analysis_time=$(cat "baselines/performance/$SAVE_BASELINE/analysis_time.txt") + echo "Analysis Time: ${analysis_time}s" >> benchmark-summary.txt + fi + + # Add comparison info if available + if [[ "$COMPARE_MODE" == "true" && -f "baselines/performance/$LOAD_BASELINE/analysis_time.txt" ]]; then + local baseline_time=$(cat "baselines/performance/$LOAD_BASELINE/analysis_time.txt") + echo "Baseline Time: ${baseline_time}s" >> benchmark-summary.txt + echo "Threshold: $REGRESSION_THRESHOLD" >> benchmark-summary.txt + fi + + # Add build info + echo "" >> benchmark-summary.txt + echo "## Environment" >> benchmark-summary.txt + echo "Rust Version: $(rustc --version 2>/dev/null || echo 'Unknown')" >> benchmark-summary.txt + echo "Host: $(rustc -vV 2>/dev/null | grep host | cut -d' ' -f2 || echo 'Unknown')" >> benchmark-summary.txt + fi +} + +# Main execution +main() { + print_header + check_prerequisites + clean_build + build_rustowl + run_benchmarks + + # Check for regressions and set exit code + local exit_code=0 + if ! analyze_regressions; then + exit_code=1 + fi + + # Ensure we have a summary file for CI + create_basic_summary + + open_report + show_results_location + + if [[ "$SHOW_OUTPUT" == "true" ]]; then + if [[ $exit_code -eq 0 ]]; then + echo -e "${GREEN}${BOLD}✓ Benchmark completed successfully!${NC}" + else + echo -e "${RED}${BOLD}⚠ Benchmark completed with performance regressions detected${NC}" + fi + fi + + exit $exit_code +} + +# Run main function +main "$@" diff --git a/scripts/size-check.sh b/scripts/size-check.sh new file mode 100755 index 00000000..51ab66dd --- /dev/null +++ b/scripts/size-check.sh @@ -0,0 +1,345 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# RustOwl Binary Size Monitoring Script +# Tracks and validates binary size metrics + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +cd "$REPO_ROOT" + +# Configuration +SIZE_BASELINE_FILE="baselines/size_baseline.txt" +SIZE_THRESHOLD_PCT=10 # Warn if binary size increases by more than 10% + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +show_help() { + echo "RustOwl Binary Size Monitoring" + echo "" + echo "USAGE:" + echo " $0 [OPTIONS] [COMMAND]" + echo "" + echo "COMMANDS:" + echo " check Check current binary sizes (default)" + echo " baseline Create/update size baseline" + echo " compare Compare current sizes with baseline" + echo " clean Remove baseline file" + echo "" + echo "OPTIONS:" + echo " -h, --help Show this help message" + echo " -t, --threshold Set size increase threshold (default: ${SIZE_THRESHOLD_PCT}%)" + echo " -v, --verbose Show verbose output" + echo "" + echo "EXAMPLES:" + echo " $0 # Check current binary sizes" + echo " $0 baseline # Create baseline from current build" + echo " $0 compare # Compare with baseline" + echo " $0 -t 15 compare # Compare with 15% threshold" +} + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Get binary size in bytes +get_binary_size() { + local binary_path="$1" + if [ -f "$binary_path" ]; then + stat --format="%s" "$binary_path" 2>/dev/null || stat -f%z "$binary_path" 2>/dev/null || echo "0" + else + echo "0" + fi +} + +# Format size for human reading +format_size() { + local size="$1" + if command -v numfmt &> /dev/null; then + numfmt --to=iec-i --suffix=B "$size" + else + # Fallback formatting + if [ "$size" -ge 1048576 ]; then + echo "$(($size / 1048576))MB" + elif [ "$size" -ge 1024 ]; then + echo "$(($size / 1024))KB" + else + echo "${size}B" + fi + fi +} + +# Build binaries if they don't exist +ensure_binaries_built() { + local binaries=( + "target/release/rustowl" + "target/release/rustowlc" + ) + + local need_build=false + for binary in "${binaries[@]}"; do + if [ ! -f "$binary" ]; then + need_build=true + break + fi + done + + if $need_build; then + log_info "Building release binaries..." + if ! cargo build --release; then + log_error "Failed to build release binaries" + exit 1 + fi + fi +} + +# Check current binary sizes +check_sizes() { + log_info "Checking binary sizes..." + + ensure_binaries_built + + local binaries=( + "target/release/rustowl" + "target/release/rustowlc" + ) + + echo "" + printf "%-20s %10s %15s\n" "Binary" "Size" "Formatted" + printf "%-20s %10s %15s\n" "------" "----" "---------" + + for binary in "${binaries[@]}"; do + local size + size=$(get_binary_size "$binary") + local formatted + formatted=$(format_size "$size") + local name + name=$(basename "$binary") + + printf "%-20s %10d %15s\n" "$name" "$size" "$formatted" + done + echo "" +} + +# Create size baseline +create_baseline() { + log_info "Creating size baseline..." + + ensure_binaries_built + + local binaries=( + "target/release/rustowl" + "target/release/rustowlc" + ) + + # Create target directory if it doesn't exist + mkdir -p "$(dirname "$SIZE_BASELINE_FILE")" + + # Write baseline + { + echo "# RustOwl Binary Size Baseline" + echo "# Generated on $(date)" + echo "# Format: binary_name:size_in_bytes" + for binary in "${binaries[@]}"; do + local size + size=$(get_binary_size "$binary") + local name + name=$(basename "$binary") + echo "$name:$size" + done + } > "$SIZE_BASELINE_FILE" + + log_success "Baseline created at $SIZE_BASELINE_FILE" + + # Show what was recorded + echo "" + log_info "Baseline contents:" + check_sizes +} + +# Compare with baseline +compare_with_baseline() { + if [ ! -f "$SIZE_BASELINE_FILE" ]; then + log_error "No baseline file found at $SIZE_BASELINE_FILE" + log_info "Run '$0 baseline' to create one" + exit 1 + fi + + log_info "Comparing with baseline (threshold: ${SIZE_THRESHOLD_PCT}%)..." + + ensure_binaries_built + + local binaries=( + "target/release/rustowl" + "target/release/rustowlc" + ) + + local any_issues=false + + echo "" + printf "%-20s %12s %12s %10s %8s\n" "Binary" "Baseline" "Current" "Diff" "Change" + printf "%-20s %12s %12s %10s %8s\n" "------" "--------" "-------" "----" "------" + + for binary in "${binaries[@]}"; do + local name + name=$(basename "$binary") + + # Get baseline size + local baseline_size + baseline_size=$(grep "^$name:" "$SIZE_BASELINE_FILE" | cut -d: -f2 || echo "0") + + if [ "$baseline_size" = "0" ]; then + log_warning "No baseline found for $name" + continue + fi + + # Get current size + local current_size + current_size=$(get_binary_size "$binary") + + if [ "$current_size" = "0" ]; then + log_error "Binary $name not found" + any_issues=true + continue + fi + + # Calculate difference + local diff=$((current_size - baseline_size)) + local pct_change=0 + + if [ "$baseline_size" -gt 0 ]; then + pct_change=$(echo "scale=1; $diff * 100 / $baseline_size" | bc 2>/dev/null || echo "0") + fi + + # Format for display + local baseline_fmt current_fmt diff_fmt + baseline_fmt=$(format_size "$baseline_size") + current_fmt=$(format_size "$current_size") + + if [ "$diff" -gt 0 ]; then + diff_fmt="+$(format_size "$diff")" + elif [ "$diff" -lt 0 ]; then + diff_fmt="-$(format_size $((-diff)))" + else + diff_fmt="0B" + fi + + printf "%-20s %12s %12s %10s %7s%%\n" "$name" "$baseline_fmt" "$current_fmt" "$diff_fmt" "$pct_change" + + # Check threshold + local abs_pct_change + abs_pct_change=$(echo "$pct_change" | tr -d '-') + + if (( $(echo "$abs_pct_change > $SIZE_THRESHOLD_PCT" | bc -l) )); then + if [ "$diff" -gt 0 ]; then + log_warning "$name size increased by $pct_change% (threshold: ${SIZE_THRESHOLD_PCT}%)" + else + log_info "$name size decreased by $pct_change%" + fi + any_issues=true + fi + done + + echo "" + + if $any_issues; then + log_warning "Some binaries exceeded size thresholds" + exit 1 + else + log_success "All binary sizes within acceptable ranges" + fi +} + +# Clean baseline +clean_baseline() { + if [ -f "$SIZE_BASELINE_FILE" ]; then + rm "$SIZE_BASELINE_FILE" + log_success "Baseline file removed" + else + log_info "No baseline file to remove" + fi +} + +main() { + local command="check" + local verbose=false + + # Parse arguments + while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + show_help + exit 0 + ;; + -t|--threshold) + if [[ $# -lt 2 ]]; then + log_error "Option --threshold requires a value" + exit 1 + fi + SIZE_THRESHOLD_PCT="$2" + shift 2 + ;; + -v|--verbose) + verbose=true + shift + ;; + check|baseline|compare|clean) + command="$1" + shift + ;; + *) + log_error "Unknown option: $1" + show_help + exit 1 + ;; + esac + done + + # Ensure bc is available for calculations + if ! command -v bc &> /dev/null; then + log_error "bc (basic calculator) is required but not installed" + log_info "Install with: apt-get install bc" + exit 1 + fi + + case $command in + check) + check_sizes + ;; + baseline) + create_baseline + ;; + compare) + compare_with_baseline + ;; + clean) + clean_baseline + ;; + *) + log_error "Unknown command: $command" + show_help + exit 1 + ;; + esac +} + +main "$@"