-
-
Notifications
You must be signed in to change notification settings - Fork 79
test(benches): add basic benchmark infrastructure #602
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,34 @@ | ||
| on: | ||
| push: | ||
| branches: main | ||
|
|
||
| jobs: | ||
| run_benchmarks: | ||
| name: Run PR Benchmarks | ||
| runs-on: ubuntu-latest | ||
| steps: | ||
| - uses: actions/checkout@v4 | ||
| - name: Install Valgrind | ||
| run: sudo apt update && sudo apt install -y valgrind | ||
| - uses: taiki-e/install-action@cargo-binstall | ||
| - name: Install gungraun-runner | ||
| run: | | ||
| version=$(cargo metadata --format-version=1 |\ | ||
| jq '.packages[] | select(.name == "gungraun").version' |\ | ||
| tr -d '"' | ||
| ) | ||
| cargo binstall --no-confirm gungraun-runner --version $version | ||
| - name: Run Benchmarks | ||
| run: cargo bench --manifest-path ./benches/Cargo.toml > benchmark_results.txt | ||
| - name: Track Benchmarks with Bencher | ||
| run: | | ||
| bencher run \ | ||
| --host 'https://bencher.php.rs/api' \ | ||
| --project ext-php-rs \ | ||
| --token '${{ secrets.BENCHER_API_TOKEN }}' \ | ||
| --branch master \ | ||
| --testbed "$(php --version | head -n 1)" \ | ||
| --err \ | ||
| --adapter rust_gungraun \ | ||
| --github-actions '${{ secrets.GITHUB_TOKEN }}' \ | ||
| --file "./benchmark_results.txt" | ||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,18 @@ | ||||||
| on: | ||||||
| pull_request_target: | ||||||
| types: [closed] | ||||||
|
|
||||||
| jobs: | ||||||
| archive_fork_pr_branch: | ||||||
| name: Archive closed PR branch with Bencher | ||||||
| runs-on: ubuntu-latest | ||||||
| steps: | ||||||
| - uses: actions/checkout@v4 | ||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
| - uses: bencherdev/bencher@main | ||||||
| - name: Archive closed fork PR branch with Bencher | ||||||
| run: | | ||||||
| bencher archive \ | ||||||
| --host 'https://bencher.php.rs/api' \ | ||||||
| --project ext-php-rs \ | ||||||
| --token '${{ secrets.BENCHER_API_TOKEN }}' \ | ||||||
| --branch "$GITHUB_HEAD_REF" | ||||||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,34 @@ | ||||||
| name: Run Benchmarks | ||||||
|
|
||||||
| on: | ||||||
| pull_request: | ||||||
| types: [opened, reopened, edited, synchronize] | ||||||
|
|
||||||
| jobs: | ||||||
| run_benchmarks: | ||||||
| name: Run PR Benchmarks | ||||||
| runs-on: ubuntu-latest | ||||||
| steps: | ||||||
| - uses: actions/checkout@v4 | ||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
| - name: Install Valgrind | ||||||
| run: sudo apt update && sudo apt install -y valgrind | ||||||
| - uses: taiki-e/install-action@cargo-binstall | ||||||
| - name: Install gungraun-runner | ||||||
| run: | | ||||||
| version=$(cargo metadata --manifest-path ./benches/Cargo.toml --format-version=1 |\ | ||||||
| jq '.packages[] | select(.name == "gungraun").version' |\ | ||||||
| tr -d '"' | ||||||
| ) | ||||||
| cargo binstall --no-confirm gungraun-runner --version $version | ||||||
| - name: Run Benchmarks | ||||||
| run: cargo bench --manifest-path ./benches/Cargo.toml > benchmark_results.txt | ||||||
| - name: Upload Benchmark Results | ||||||
| uses: actions/upload-artifact@v4 | ||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
| with: | ||||||
| name: benchmark_results.txt | ||||||
| path: ./benchmark_results.txt | ||||||
| - name: Upload Pull Request Event | ||||||
| uses: actions/upload-artifact@v4 | ||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
| with: | ||||||
| name: event.json | ||||||
| path: ${{ github.event_path }} | ||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,57 @@ | ||
| name: Upload PR Benchmark Results | ||
|
|
||
| on: | ||
| workflow_run: | ||
| workflows: [Run Benchmarks] | ||
| types: [completed] | ||
|
|
||
| jobs: | ||
| upload_benchmarks: | ||
| if: github.event.workflow_run.conclusion == 'success' | ||
| permissions: | ||
| pull-requests: write | ||
| runs-on: ubuntu-latest | ||
| env: | ||
| BENCHMARK_RESULTS: benchmark_results.txt | ||
| PR_EVENT: event.json | ||
| steps: | ||
| - name: Download Benchmark Results | ||
| uses: dawidd6/action-download-artifact@v6 | ||
| with: | ||
| name: ${{ env.BENCHMARK_RESULTS }} | ||
| run_id: ${{ github.event.workflow_run.id }} | ||
| - name: Download PR Event | ||
| uses: dawidd6/action-download-artifact@v6 | ||
| with: | ||
| name: ${{ env.PR_EVENT }} | ||
| run_id: ${{ github.event.workflow_run.id }} | ||
| - name: Export PR Event Data | ||
| uses: actions/github-script@v6 | ||
| with: | ||
| script: | | ||
| let fs = require('fs'); | ||
| let prEvent = JSON.parse(fs.readFileSync(process.env.PR_EVENT, {encoding: 'utf8'})); | ||
| core.exportVariable("PR_HEAD", prEvent.pull_request.head.ref); | ||
| core.exportVariable("PR_HEAD_SHA", prEvent.pull_request.head.sha); | ||
| core.exportVariable("PR_BASE", prEvent.pull_request.base.ref); | ||
| core.exportVariable("PR_BASE_SHA", prEvent.pull_request.base.sha); | ||
| core.exportVariable("PR_NUMBER", prEvent.number); | ||
| - uses: bencherdev/bencher@main | ||
| - name: Track Benchmarks with Bencher | ||
| run: | | ||
| bencher run \ | ||
| --host 'https://bencher.php.rs/api' \ | ||
| --project ext-php-rs \ | ||
| --token '${{ secrets.BENCHER_API_TOKEN }}' \ | ||
| --branch "$PR_HEAD" \ | ||
| --hash "$PR_HEAD_SHA" \ | ||
| --start-point "$PR_BASE" \ | ||
| --start-point-hash "$PR_BASE_SHA" \ | ||
| --start-point-clone-thresholds \ | ||
| --start-point-reset \ | ||
| --testbed "$(php --version | head -n 1)" \ | ||
| --err \ | ||
| --adapter rust_gungraun \ | ||
| --github-actions '${{ secrets.GITHUB_TOKEN }}' \ | ||
| --ci-number "$PR_NUMBER" \ | ||
| --file "$BENCHMARK_RESULTS" |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,14 @@ | ||
| [target.'cfg(not(target_os = "windows"))'] | ||
| rustflags = ["-C", "link-arg=-Wl,-undefined,dynamic_lookup"] | ||
|
|
||
| [target.x86_64-pc-windows-msvc] | ||
| linker = "rust-lld" | ||
|
|
||
| [target.i686-pc-windows-msvc] | ||
| linker = "rust-lld" | ||
|
|
||
| [target.'cfg(target_env = "musl")'] | ||
| rustflags = ["-C", "target-feature=-crt-static"] | ||
|
|
||
| [profile.bench] | ||
| debug = true |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| /target |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,25 @@ | ||
| [package] | ||
| name = "benches" | ||
| version = "0.1.0" | ||
| edition = "2024" | ||
| publish = false | ||
|
|
||
| [dependencies] | ||
| ext-php-rs = { path = "../" } | ||
| gungraun = { version = "0.17.0", features = ["client_requests"] } | ||
|
|
||
| [features] | ||
| default = ["enum", "runtime", "closure"] | ||
| enum = ["ext-php-rs/enum"] | ||
| anyhow = ["ext-php-rs/anyhow"] | ||
| runtime = ["ext-php-rs/runtime"] | ||
| closure = ["ext-php-rs/closure"] | ||
| static = ["ext-php-rs/static"] | ||
|
|
||
| [lib] | ||
| crate-type = ["cdylib"] | ||
| bench = false | ||
|
|
||
| [[bench]] | ||
| name = "binary_bench" | ||
| harness = false |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,102 @@ | ||
| use std::{ | ||
| process::Command, | ||
| sync::{LazyLock, Once}, | ||
| }; | ||
|
|
||
| use gungraun::{ | ||
| binary_benchmark, binary_benchmark_group, main, BinaryBenchmarkConfig, Callgrind, | ||
| FlamegraphConfig, | ||
| }; | ||
|
|
||
| static BUILD: Once = Once::new(); | ||
| static EXT_TARGET_DIR: LazyLock<String> = LazyLock::new(|| { | ||
| let mut dir = std::env::current_dir().expect("Could not get cwd"); | ||
| dir.push("target"); | ||
| dir.push("release"); | ||
| dir.display().to_string() | ||
| }); | ||
|
|
||
| fn setup() { | ||
| BUILD.call_once(|| { | ||
| let mut command = Command::new("cargo"); | ||
| command.arg("build"); | ||
|
|
||
| command.arg("--release"); | ||
|
|
||
| // Build features list dynamically based on compiled features | ||
| // Note: Using vec_init_then_push pattern here is intentional due to conditional compilation | ||
| #[allow(clippy::vec_init_then_push)] | ||
| { | ||
| let mut features = vec![]; | ||
| #[cfg(feature = "enum")] | ||
| features.push("enum"); | ||
| #[cfg(feature = "closure")] | ||
| features.push("closure"); | ||
| #[cfg(feature = "anyhow")] | ||
| features.push("anyhow"); | ||
| #[cfg(feature = "runtime")] | ||
| features.push("runtime"); | ||
| #[cfg(feature = "static")] | ||
| features.push("static"); | ||
|
|
||
| if !features.is_empty() { | ||
| command.arg("--no-default-features"); | ||
| command.arg("--features").arg(features.join(",")); | ||
| } | ||
| } | ||
|
|
||
| let result = command.output().expect("failed to execute cargo build"); | ||
|
|
||
| assert!( | ||
| result.status.success(), | ||
| "Extension build failed:\nstdout: {}\nstderr: {}", | ||
| String::from_utf8_lossy(&result.stdout), | ||
| String::from_utf8_lossy(&result.stderr) | ||
| ); | ||
| }); | ||
| } | ||
|
|
||
| #[binary_benchmark] | ||
| #[bench::single_function_call(args = ("benches/function_call.php", 1))] | ||
| #[bench::multiple_function_calls(args = ("benches/function_call.php", 10))] | ||
| #[bench::lots_of_function_calls(args = ("benches/function_call.php", 100_000))] | ||
| fn function_calls(path: &str, cnt: usize) -> gungraun::Command { | ||
| setup(); | ||
|
|
||
| gungraun::Command::new("php") | ||
| .arg(format!("-dextension={}/libbenches.so", *EXT_TARGET_DIR)) | ||
| .arg(path) | ||
| .arg(cnt.to_string()) | ||
| .build() | ||
| } | ||
|
|
||
| #[binary_benchmark] | ||
| #[bench::single_callback_call(args = ("benches/callback_call.php", 1))] | ||
| #[bench::multiple_callback_calls(args = ("benches/callback_call.php", 10))] | ||
| #[bench::lots_of_callback_calls(args = ("benches/callback_call.php", 100_000))] | ||
| fn callback_calls(path: &str, cnt: usize) -> gungraun::Command { | ||
| setup(); | ||
|
|
||
| gungraun::Command::new("php") | ||
| .arg(format!("-dextension={}/libbenches.so", *EXT_TARGET_DIR)) | ||
| .arg(path) | ||
| .arg(cnt.to_string()) | ||
| .build() | ||
| } | ||
|
|
||
| binary_benchmark_group!( | ||
| name = function; | ||
| benchmarks = function_calls | ||
| ); | ||
|
|
||
| binary_benchmark_group!( | ||
| name = callback; | ||
| benchmarks = callback_calls | ||
| ); | ||
|
|
||
| main!( | ||
| config = BinaryBenchmarkConfig::default() | ||
| .tool(Callgrind::with_args(["--instr-atstart=no", "--I1=32768,8,64", "--D1=32768,8,64", "--LL=67108864,16,64"]) | ||
| .flamegraph(FlamegraphConfig::default())); | ||
| binary_benchmark_groups = function, callback | ||
| ); |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,5 @@ | ||
| <?php | ||
|
|
||
| declare(strict_types=1); | ||
|
|
||
| bench_callback_function(fn ($i) => $i * 2, (int) $argv[1]); |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,11 @@ | ||
| <?php | ||
|
|
||
| declare(strict_types=1); | ||
|
|
||
| start_instrumentation(); | ||
|
|
||
| foreach (range(1, $argv[1]) as $i) { | ||
| bench_function($i); | ||
| } | ||
|
|
||
| stop_instrumentation(); |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,47 @@ | ||
| #![cfg_attr(windows, feature(abi_vectorcall))] | ||
| #![allow( | ||
| clippy::must_use_candidate, | ||
| clippy::missing_panics_doc, | ||
| clippy::needless_pass_by_value, | ||
| clippy::implicit_hasher | ||
| )] | ||
|
|
||
| use ext_php_rs::{prelude::*, types::Zval}; | ||
|
|
||
| #[php_function] | ||
| pub fn bench_function(n: u64) -> u64 { | ||
| // A simple function that does not do much work | ||
| n | ||
| } | ||
|
|
||
| #[php_function] | ||
| pub fn bench_callback_function(callback: ZendCallable, n: usize) { | ||
| // Call the provided PHP callable with a fixed argument | ||
| start_instrumentation(); | ||
| for i in 0..n { | ||
| callback | ||
| .try_call(vec![&i]) | ||
| .expect("Failed to call function"); | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is there a more graceful way to report an error here instead of panicking for the benchmark? |
||
| } | ||
| stop_instrumentation(); | ||
| } | ||
|
|
||
| #[php_function] | ||
| pub fn start_instrumentation() { | ||
| gungraun::client_requests::callgrind::start_instrumentation(); | ||
| // gungraun::client_requests::callgrind::toggle_collect(); | ||
| } | ||
|
|
||
| #[php_function] | ||
| pub fn stop_instrumentation() { | ||
| gungraun::client_requests::callgrind::stop_instrumentation(); | ||
| } | ||
|
|
||
| #[php_module] | ||
| pub fn build_module(module: ModuleBuilder) -> ModuleBuilder { | ||
| module | ||
| .function(wrap_function!(bench_function)) | ||
| .function(wrap_function!(bench_callback_function)) | ||
| .function(wrap_function!(start_instrumentation)) | ||
| .function(wrap_function!(stop_instrumentation)) | ||
| } | ||
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.