Skip to content

Improve benchmarking for better view into perf improvements and regressions #52

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 4 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,35 @@ jobs:
run: cargo test --all-features
- name: Run integration tests
run: cargo test -- --test-threads=1 --ignored
bench:
runs-on: ubuntu-latest
env:
IAI_CALLGRIND_ALLOW_ASLR: "true"
IAI_CALLGRIND_SAVE_BASELINE: baseline
IAI_CALLGRIND_REGRESSION: Ir=0.5, EstimatedCycles=10, SysCount=0
services:
memcached:
image: memcached:latest
ports:
- 11211:11211
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Install libclang-dev and valgrind for benchmarking
run: sudo apt install libclang-dev valgrind
- name: Install iai-callgrind-runner
run: cargo install --version 0.13.4 iai-callgrind-runner
- name: Checkout main branch
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: "main"
- name: Run callgrind benchmarks against main for baseline
run: cargo bench callgrind_bench
- name: Checkout feature branch
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.pull_request.head.ref }}
- name: Benchmark feature branch against baseline
run: cargo bench callgrind_bench
build:
runs-on: ubuntu-latest
steps:
Expand Down
6 changes: 6 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ tokio = { version = "1.26", features = ["full"] }
rand = "0.8"
criterion = { version = "0.5.1", features = ["async_tokio"] }
serial_test = "3.1.1"
iai-callgrind = "0.13.4"
once_cell = "1.19.0"

[features]
default = []
Expand All @@ -43,3 +45,7 @@ path = "examples/tcp.rs"
[[bench]]
name = "bench"
harness = false

[[bench]]
name = "callgrind_bench"
harness = false
205 changes: 205 additions & 0 deletions benches/callgrind_bench.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
use async_memcached::Client;

use std::{hint::black_box, sync::Mutex};

use iai_callgrind::{library_benchmark, library_benchmark_group, main};
use once_cell::sync::Lazy;
use tokio::runtime::Runtime;

const SMALL_KEY_SIZE: usize = 10;
const LARGE_KEY_SIZE: usize = 250; // Memcached's ~default maximum key size
const SMALL_PAYLOAD_SIZE: usize = 128;
const LARGE_PAYLOAD_SIZE: usize = 1000 * 1024; // Memcached's ~default maximum payload size

const MULTI_KEY_VEC: &[&str] = &["key1", "key2", "key3"];

const MULTI_KV_VEC: &[(&str, &str)] = &[("key1", "value1"), ("key2", "value2"), ("key3", "value3")];

static CLIENT: Lazy<Mutex<Client>> = Lazy::new(|| {
let rt = Runtime::new().unwrap();
let client = rt.block_on(async {
Client::new("tcp://127.0.0.1:11211")
.await
.expect("failed to create client")
});

Mutex::new(client)
});

// 'get' method benchmarks
#[library_benchmark]
async fn bench_get_small() {
let mut client = CLIENT.lock().unwrap();
let small_key = "a".repeat(SMALL_KEY_SIZE);

let _ = black_box(client.get(&small_key).await.unwrap());
}

#[library_benchmark]
async fn bench_get_large() {
let mut client = CLIENT.lock().unwrap();
let large_key = "a".repeat(LARGE_KEY_SIZE);

let _ = black_box(client.get(&large_key).await.unwrap());
}

#[library_benchmark]
async fn bench_get_multi_small() {
let mut client = CLIENT.lock().unwrap();

let _ = black_box(client.get_multi(MULTI_KEY_VEC).await.unwrap());
}

// 'set' method benchmarks
#[library_benchmark]
async fn bench_set_small() {
let mut client = CLIENT.lock().unwrap();
let small_key = "a".repeat(SMALL_KEY_SIZE);
let small_payload = "b".repeat(SMALL_PAYLOAD_SIZE);

let _ = black_box(
client
.set(&small_key, &small_payload, None, None)
.await
.unwrap(),
);
}

#[library_benchmark]
async fn bench_set_large() {
let mut client = CLIENT.lock().unwrap();
let small_key = "a".repeat(SMALL_KEY_SIZE);
let large_payload = "b".repeat(LARGE_PAYLOAD_SIZE);

let _ = black_box(
client
.set(&small_key, &large_payload, None, None)
.await
.unwrap(),
);
}

#[library_benchmark]
async fn bench_set_multi_small() {
let mut client = CLIENT.lock().unwrap();

let _ = black_box(client.set_multi(MULTI_KV_VEC, None, None).await.unwrap());
}

// 'add' method benchmarks
#[library_benchmark]
async fn bench_add_small() {
let mut client = CLIENT.lock().unwrap();
let small_key = "a".repeat(SMALL_KEY_SIZE);
let small_payload = "b".repeat(SMALL_PAYLOAD_SIZE);

let _ = black_box(
client
.add(&small_key, &small_payload, None, None)
.await
.unwrap(),
);
}

#[library_benchmark]
async fn bench_add_large() {
let mut client = CLIENT.lock().unwrap();
let small_key = "a".repeat(SMALL_KEY_SIZE);
let large_payload = "b".repeat(LARGE_PAYLOAD_SIZE);

let _ = black_box(
client
.add(&small_key, &large_payload, None, None)
.await
.unwrap(),
);
}

#[library_benchmark]
async fn bench_add_multi_small() {
let mut client = CLIENT.lock().unwrap();

let _ = black_box(client.add_multi(MULTI_KV_VEC, None, None).await.unwrap());
}

// 'delete' method benchmarks
#[library_benchmark]
async fn bench_delete() {
let mut client = CLIENT.lock().unwrap();
let small_key = "a".repeat(SMALL_KEY_SIZE);

let _ = black_box(client.delete(&small_key).await.unwrap());
}

#[library_benchmark]
async fn bench_delete_multi_no_reply_small() {
let mut client = CLIENT.lock().unwrap();

let _ = black_box(client.delete_multi_no_reply(MULTI_KEY_VEC).await.unwrap());
}

// 'increment' method benchmarks
#[library_benchmark]
async fn bench_increment() {
let mut client = CLIENT.lock().unwrap();
let small_key = "a".repeat(SMALL_KEY_SIZE);
let _ = client.set(&small_key, 0_u64, None, None).await.unwrap();

let _ = black_box(client.increment(&small_key, 1).await.unwrap());
}

#[library_benchmark]
async fn bench_increment_no_reply() {
let mut client = CLIENT.lock().unwrap();
let small_key = "a".repeat(SMALL_KEY_SIZE);
let _ = client.set(&small_key, 0_u64, None, None).await.unwrap();

let _ = black_box(client.increment_no_reply(&small_key, 1).await.unwrap());
}

// 'decrement' method benchmarks
#[library_benchmark]
async fn bench_decrement() {
let mut client = CLIENT.lock().unwrap();
let small_key = "a".repeat(SMALL_KEY_SIZE);
let _ = client
.set(&small_key, 1000000_u64, None, None)
.await
.unwrap();

let _ = black_box(client.decrement(&small_key, 1).await.unwrap());
}

#[library_benchmark]
async fn bench_decrement_no_reply() {
let mut client = CLIENT.lock().unwrap();
let small_key = "a".repeat(SMALL_KEY_SIZE);
let _ = client
.set(&small_key, 1000000_u64, None, None)
.await
.unwrap();

let _ = black_box(client.decrement_no_reply(&small_key, 1).await.unwrap());
}

library_benchmark_group!(
name = bench_cache_group;
benchmarks =
bench_get_small,
bench_get_large,
bench_get_multi_small,
bench_set_small,
bench_set_large,
bench_set_multi_small,
bench_add_small,
bench_add_large,
bench_add_multi_small,
bench_delete,
bench_delete_multi_no_reply_small,
bench_increment,
bench_increment_no_reply,
bench_decrement,
bench_decrement_no_reply,
);

main!(library_benchmark_groups = bench_cache_group);
Loading