Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,7 @@ dist_noinst_SCRIPTS = autogen.sh
EXTRA_DIST = $(DIST_SHARE) $(DIST_CONTRIB) $(WINDOWS_PACKAGING) $(OSX_PACKAGING) $(BIN_CHECKS)

EXTRA_DIST += \
test/bench \
test/functional \
test/fuzz

Expand Down Expand Up @@ -319,5 +320,5 @@ clean-docs:

clean-local: clean-docs
rm -rf coverage_percent.txt test_dash.coverage/ total.coverage/ fuzz.coverage/ test/tmp/ cache/ $(OSX_APP)
rm -rf test/functional/__pycache__ test/functional/test_framework/__pycache__ test/cache share/rpcauth/__pycache__
rm -rf test/bench/__pycache__ test/functional/__pycache__ test/functional/test_framework/__pycache__ test/cache share/rpcauth/__pycache__
rm -rf dist/
4 changes: 4 additions & 0 deletions ci/lint/04_install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,15 @@ if [ -z "${SKIP_PYTHON_INSTALL}" ]; then
fi

# NOTE: BUMP ALSO contrib/containers/ci/ci-slim.Dockerfile
${CI_RETRY_EXE} pip3 install aiohttp==3.13.3
${CI_RETRY_EXE} pip3 install codespell==2.2.1
${CI_RETRY_EXE} pip3 install flake8==5.0.4
${CI_RETRY_EXE} pip3 install jinja2==3.1.6
${CI_RETRY_EXE} pip3 install lief==0.13.2
${CI_RETRY_EXE} pip3 install multiprocess==0.70.19
${CI_RETRY_EXE} pip3 install mypy==0.981
${CI_RETRY_EXE} pip3 install pyzmq==24.0.1
${CI_RETRY_EXE} pip3 install tabulate==0.10.0
${CI_RETRY_EXE} pip3 install vulture==2.6

SHELLCHECK_VERSION=v0.8.0
Expand Down
1 change: 1 addition & 0 deletions configure.ac
Original file line number Diff line number Diff line change
Expand Up @@ -2062,6 +2062,7 @@ AC_CONFIG_LINKS([src/.bear-tidy-config:src/.bear-tidy-config])
AC_CONFIG_LINKS([src/.clang-tidy:src/.clang-tidy])
AC_CONFIG_LINKS([src/ipc/.clang-tidy:src/ipc/.clang-tidy])
AC_CONFIG_LINKS([src/test/.clang-tidy:src/test/.clang-tidy])
AC_CONFIG_LINKS([test/bench/bench_runner.py:test/bench/bench_runner.py])
AC_CONFIG_LINKS([test/functional/test_runner.py:test/functional/test_runner.py])
AC_CONFIG_LINKS([test/fuzz/test_runner.py:test/fuzz/test_runner.py])
AC_CONFIG_LINKS([test/util/test_runner.py:test/util/test_runner.py])
Expand Down
6 changes: 4 additions & 2 deletions contrib/containers/ci/ci-slim.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,15 @@ ENV UV_SYSTEM_PYTHON=1
# Install Python packages
# NOTE: if versions are changed, update ci/lint/04_install.sh
RUN uv pip install --system --break-system-packages \
aiohttp==3.13.3 \
codespell==2.2.1 \
flake8==5.0.4 \
jinja2 \
jinja2==3.1.6 \
lief==0.13.2 \
multiprocess \
multiprocess==0.70.19 \
mypy==0.981 \
pyzmq==24.0.1 \
tabulate==0.10.0 \
vulture==2.6

# Install packages relied on by tests
Expand Down
1 change: 1 addition & 0 deletions src/.bear-tidy-config
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
{
"schema": "4.0",
"output": {
"content": {
"include_only_existing_source": true,
Expand Down
133 changes: 133 additions & 0 deletions test/bench/bench_framework.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
#!/usr/bin/env python3
# Copyright (c) 2026 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.

import os
import sys
import time
from typing import Dict, List, Optional

# Allow imports from the functional test framework.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'functional'))

from bench_results import ( # noqa: E402
BenchResult,
results_to_markdown,
save_results,
)
from test_framework.test_framework import BitcoinTestFramework # noqa: E402


class BenchFramework(BitcoinTestFramework):

def set_test_params(self) -> None:
"""Initialise benchmark state, then delegate to ``set_bench_params``."""
self.warmup_iterations: int = 0
self.bench_iterations: int = 1
self.bench_name: str = type(self).__name__
self.results_file: Optional[str] = None
# Raw latency samples keyed by measurement name.
self._samples: Dict[str, List[float]] = {}
self._timer_start: Optional[float] = None
self.set_bench_params()

def setup_nodes(self) -> None:
"""Merge --daemon-args into extra_args just before nodes start."""
raw = getattr(self.options, "daemon_args", None) or ""
daemon_args = raw.split() if raw else []
if daemon_args and self.extra_args is not None:
for node_args in self.extra_args:
node_args.extend(daemon_args)
super().setup_nodes()

def run_test(self) -> None:
"""Execute warmup, timed iterations, then report."""
self.results_file = getattr(self.options, "results_file", None)
if self.warmup_iterations > 0:
self.log.info(
"Warming up (%d iteration%s)...",
self.warmup_iterations,
"s" if self.warmup_iterations != 1 else "",
)
for i in range(self.warmup_iterations):
self.log.debug(" warmup %d/%d", i + 1, self.warmup_iterations)
self.run_bench()
self._samples.clear()

self.log.info(
"Running benchmark (%d iteration%s)...",
self.bench_iterations,
"s" if self.bench_iterations != 1 else "",
)
for i in range(self.bench_iterations):
self.log.debug(" iteration %d/%d", i + 1, self.bench_iterations)
self.run_bench()

self._report_results()

def add_options(self, parser) -> None: # type: ignore[override]
"""Adds bench-specific args. Subclasses should call super first."""
parser.add_argument(
"--daemon-args",
dest="daemon_args",
default=None,
help="Extra daemon arguments as a single string "
"(e.g. --daemon-args=\"-rpcworkqueue=1024 -rpcthreads=8\")",
)
parser.add_argument(
"--results-file",
dest="results_file",
default=None,
help="Save results to a JSON file",
)

def set_bench_params(self) -> None:
"""Benchmarks must override this to set ``num_nodes``, etc."""
raise NotImplementedError

def run_bench(self) -> None:
"""Benchmarks must override this to define the workload."""
raise NotImplementedError

def start_timer(self) -> None:
"""Mark the beginning of a timed section."""
if self._timer_start is not None:
self.log.warning("start_timer() called twice without stop_timer()")
self._timer_start = time.perf_counter()

def stop_timer(self, name: str) -> float:
"""Record elapsed time (ms) since the last ``start_timer()`` call, returns in ms."""
if self._timer_start is None:
raise RuntimeError("stop_timer() called without start_timer()")
elapsed_ms = (time.perf_counter() - self._timer_start) * 1000.0
self._timer_start = None
self._samples.setdefault(name, []).append(elapsed_ms)
return elapsed_ms

def record_sample(self, name: str, value_ms: float) -> None:
"""Directly record a latency sample (ms) without using the timer."""
self._samples.setdefault(name, []).append(value_ms)

def _build_results(self) -> List[BenchResult]:
"""Convert raw samples into a list of ``BenchResult`` objects."""
return [
BenchResult.from_samples(name, samples)
for name, samples in self._samples.items()
if samples
]

def _report_results(self) -> None:
"""Print a summary of all recorded measurements."""
results = self._build_results()
md = results_to_markdown(results, title=self.bench_name)
print(md)

if self.results_file:
save_results(results, self.results_file, label=self.bench_name)
self.log.info("Results saved to %s", self.results_file)

@property
def samples(self) -> Dict[str, List[float]]:
"""Access the raw sample data."""
return self._samples
Loading
Loading