|
| 1 | +//! Criterion benchmarks for the `get_header` PBS flow. |
| 2 | +//! |
| 3 | +//! # What this measures |
| 4 | +//! |
| 5 | +//! The full `get_header` pipeline end-to-end: HTTP fan-out to N in-process mock |
| 6 | +//! relays, response parsing, header validation, signature verification, and bid |
| 7 | +//! selection. This is wall-clock timing — useful for local development feedback |
| 8 | +//! and catching latency regressions across relay counts. |
| 9 | +//! |
| 10 | +//! Criterion runs each benchmark hundreds of times, applies statistical analysis, |
| 11 | +//! and reports mean ± standard deviation. Results are saved to |
| 12 | +//! `target/criterion/` as HTML reports (open `report/index.html`). |
| 13 | +//! |
| 14 | +//! # Running |
| 15 | +//! |
| 16 | +//! ```bash |
| 17 | +//! # Run all benchmarks |
| 18 | +//! cargo bench --package cb-bench-micro |
| 19 | +//! |
| 20 | +//! # Run a specific variant by filter |
| 21 | +//! cargo bench --package cb-bench-micro -- 3_relays |
| 22 | +//! |
| 23 | +//! # Save a named baseline to compare against later |
| 24 | +//! cargo bench --package cb-bench-micro -- --save-baseline main |
| 25 | +//! |
| 26 | +//! # Compare against a saved baseline |
| 27 | +//! cargo bench --package cb-bench-micro -- --load-baseline main --save-baseline current |
| 28 | +//! ``` |
| 29 | +//! |
| 30 | +//! # What is NOT measured |
| 31 | +//! |
| 32 | +//! - PBS HTTP server overhead (we call `get_header()` directly, bypassing axum routing) |
| 33 | +//! - Mock relay startup time (servers are started once in setup, before timing begins) |
| 34 | +//! - `HeaderMap` allocation (created once in setup, cloned cheaply per iteration) |
| 35 | +
|
| 36 | +use std::{path::PathBuf, sync::Arc, time::Duration}; |
| 37 | + |
| 38 | +use alloy::primitives::B256; |
| 39 | +use axum::http::HeaderMap; |
| 40 | +use cb_common::{pbs::GetHeaderParams, signer::random_secret, types::Chain}; |
| 41 | +use cb_pbs::{PbsState, get_header}; |
| 42 | +use cb_tests::{ |
| 43 | + mock_relay::{MockRelayState, start_mock_relay_service}, |
| 44 | + utils::{generate_mock_relay, get_pbs_static_config, to_pbs_config}, |
| 45 | +}; |
| 46 | +use criterion::{Criterion, black_box, criterion_group, criterion_main}; |
| 47 | + |
| 48 | +// Ports 19201–19205 are reserved for the microbenchmark mock relays. |
| 49 | +const BASE_PORT: u16 = 19200; |
| 50 | +const CHAIN: Chain = Chain::Hoodi; |
| 51 | +const MAX_RELAYS: usize = 5; |
| 52 | +const RELAY_COUNTS: [usize; 3] = [1, 3, MAX_RELAYS]; |
| 53 | + |
| 54 | +/// Benchmarks `get_header` across three relay-count variants. |
| 55 | +/// |
| 56 | +/// # Setup (runs once, not measured) |
| 57 | +/// |
| 58 | +/// All MAX_RELAYS mock relays are started up-front and shared across variants. |
| 59 | +/// Each variant gets its own `PbsState` pointing to a different relay subset. |
| 60 | +/// The mock relays are in-process axum servers on localhost. |
| 61 | +/// |
| 62 | +/// # Per-iteration (measured) |
| 63 | +/// |
| 64 | +/// Each call to `b.iter(|| ...)` runs `get_header()` once: |
| 65 | +/// - Fans out HTTP requests to N mock relays concurrently |
| 66 | +/// - Parses and validates each relay response (header data + BLS signature) |
| 67 | +/// - Selects the highest-value bid |
| 68 | +/// |
| 69 | +/// `black_box(...)` prevents the compiler from optimizing away inputs or the |
| 70 | +/// return value. Without it, the optimizer could see that the result is unused |
| 71 | +/// and eliminate the call entirely, producing a meaningless zero measurement. |
| 72 | +fn bench_get_header(c: &mut Criterion) { |
| 73 | + let rt = tokio::runtime::Runtime::new().expect("tokio runtime"); |
| 74 | + |
| 75 | + // Start all mock relays once and build one PbsState per relay-count variant. |
| 76 | + // All relays share the same MockRelayState (and therefore the same signing key). |
| 77 | + let (states, params) = rt.block_on(async { |
| 78 | + let signer = random_secret(); |
| 79 | + let pubkey = signer.public_key(); |
| 80 | + let mock_state = Arc::new(MockRelayState::new(CHAIN, signer)); |
| 81 | + |
| 82 | + let relay_clients: Vec<_> = (0..MAX_RELAYS) |
| 83 | + .map(|i| { |
| 84 | + let port = BASE_PORT + 1 + i as u16; |
| 85 | + tokio::spawn(start_mock_relay_service(mock_state.clone(), port)); |
| 86 | + generate_mock_relay(port, pubkey.clone()).expect("relay client") |
| 87 | + }) |
| 88 | + .collect(); |
| 89 | + |
| 90 | + // Give all servers time to bind before benchmarking starts. |
| 91 | + tokio::time::sleep(Duration::from_millis(200)).await; |
| 92 | + |
| 93 | + let params = GetHeaderParams { slot: 0, parent_hash: B256::ZERO, pubkey }; |
| 94 | + |
| 95 | + // Port 0 here is the port the PBS service itself would bind to for incoming |
| 96 | + // validator requests. We call get_header() as a function directly, so no |
| 97 | + // PBS server is started and this port is never used. The actual relay |
| 98 | + // endpoints are carried inside the RelayClient objects (ports 19201–19205). |
| 99 | + let states: Vec<PbsState> = RELAY_COUNTS |
| 100 | + .iter() |
| 101 | + .map(|&n| { |
| 102 | + let config = |
| 103 | + to_pbs_config(CHAIN, get_pbs_static_config(0), relay_clients[..n].to_vec()); |
| 104 | + PbsState::new(config, PathBuf::new()) |
| 105 | + }) |
| 106 | + .collect(); |
| 107 | + |
| 108 | + (states, params) |
| 109 | + }); |
| 110 | + |
| 111 | + // Empty HeaderMap matches what the PBS route handler receives for requests without |
| 112 | + // custom headers. Created once here to avoid measuring its allocation per iteration. |
| 113 | + let headers = HeaderMap::new(); |
| 114 | + |
| 115 | + // A BenchmarkGroup groups related functions so Criterion produces a single |
| 116 | + // comparison table and chart. All variants share the name "get_header/". |
| 117 | + let mut group = c.benchmark_group("get_header"); |
| 118 | + |
| 119 | + for (i, relay_count) in RELAY_COUNTS.iter().enumerate() { |
| 120 | + let state = states[i].clone(); |
| 121 | + let params = params.clone(); |
| 122 | + let headers = headers.clone(); |
| 123 | + |
| 124 | + // bench_function registers one timing function. The closure receives a |
| 125 | + // `Bencher` — calling `b.iter(|| ...)` is the measured hot loop. |
| 126 | + // Everything outside `b.iter` is setup and not timed. |
| 127 | + group.bench_function(format!("{relay_count}_relays"), |b| { |
| 128 | + b.iter(|| { |
| 129 | + // block_on drives the async future to completion on the shared |
| 130 | + // runtime. get_header takes owned args, so we clone cheap types |
| 131 | + // (Arc-backed state, stack-sized params) on each iteration. |
| 132 | + rt.block_on(get_header( |
| 133 | + black_box(params.clone()), |
| 134 | + black_box(headers.clone()), |
| 135 | + black_box(state.clone()), |
| 136 | + )) |
| 137 | + .expect("get_header failed") |
| 138 | + }) |
| 139 | + }); |
| 140 | + } |
| 141 | + |
| 142 | + group.finish(); |
| 143 | +} |
| 144 | + |
| 145 | +// criterion_group! registers bench_get_header as a benchmark group named "benches". |
| 146 | +// criterion_main! generates the main() entry point that Criterion uses to run them. |
| 147 | +criterion_group!(benches, bench_get_header); |
| 148 | +criterion_main!(benches); |
0 commit comments