From 07b6f9653036cd7a28f33e3777abe061edf634a0 Mon Sep 17 00:00:00 2001 From: PastaClaw Date: Wed, 25 Feb 2026 12:50:45 -0600 Subject: [PATCH 1/9] ci: add fuzz regression testing and continuous fuzzing infrastructure Add CI infrastructure for fuzz regression testing and continuous fuzzing: - Fuzz regression workflow (test-fuzz.yml) called from main build - Replays bitcoin-core and Dash-specific corpora with -runs=0 - Layered corpus: bitcoin-core/qa-assets + thepastaclaw/qa-assets + synthetic seeds - Reports failed targets with exit codes in summary - Sanitizer-aware (ASAN/LSAN/UBSAN with Dash-specific suppressions) - Continuous fuzz daemon (contrib/fuzz/continuous_fuzz_daemon.sh) - Designed for dedicated fuzzing hosts - Automatic corpus management and crash collection - Chain-based seed corpus generator (contrib/fuzz/seed_corpus_from_chain.py) - Extracts real transaction/block data from a running dashd - Generates synthetic seeds for Dash-specific targets - Documentation (contrib/fuzz/README.md) fix(ci): address review feedback on fuzz regression PR --- .github/workflows/build.yml | 10 + .github/workflows/test-fuzz.yml | 173 +++++++++ contrib/fuzz/README.md | 103 ++++++ contrib/fuzz/continuous_fuzz_daemon.sh | 292 +++++++++++++++ contrib/fuzz/seed_corpus_from_chain.py | 477 +++++++++++++++++++++++++ 5 files changed, 1055 insertions(+) create mode 100644 .github/workflows/test-fuzz.yml create mode 100644 contrib/fuzz/README.md create mode 100755 contrib/fuzz/continuous_fuzz_daemon.sh create mode 100755 contrib/fuzz/seed_corpus_from_chain.py diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 16cf62b9ba20..9b809f20ddbd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -290,6 +290,16 @@ jobs: depends-dep-opts: ${{ needs.depends-win64.outputs.dep-opts }} runs-on: ${{ needs.check-skip.outputs['runner-amd64'] }} + test-linux64_fuzz: + name: linux64_fuzz-test + uses: ./.github/workflows/test-fuzz.yml + needs: [container-slim, src-linux64_fuzz] + if: ${{ vars.SKIP_LINUX64_FUZZ == '' }} + with: + bundle-key: ${{ needs.src-linux64_fuzz.outputs.key }} + build-target: linux64_fuzz + container-path: ${{ needs.container-slim.outputs.path }} + test-linux64: name: linux64-test uses: ./.github/workflows/test-src.yml diff --git a/.github/workflows/test-fuzz.yml b/.github/workflows/test-fuzz.yml new file mode 100644 index 000000000000..def543ee5c1e --- /dev/null +++ b/.github/workflows/test-fuzz.yml @@ -0,0 +1,173 @@ +name: Fuzz regression + +on: + workflow_call: + inputs: + bundle-key: + description: "Key needed to access bundle of fuzz build artifacts" + required: true + type: string + build-target: + description: "Target name as defined by inputs.sh" + required: true + type: string + container-path: + description: "Path to built container at registry" + required: true + type: string + runs-on: + description: "Runner label to use" + required: false + default: ubuntu-24.04 + type: string + +jobs: + fuzz-regression: + name: Fuzz regression + runs-on: ${{ inputs.runs-on }} + container: + image: ${{ inputs.container-path }} + options: --user root + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 1 + + - name: Download build artifacts + uses: actions/download-artifact@v4 + with: + name: ${{ inputs.bundle-key }} + + - name: Extract build artifacts + run: | + git config --global --add safe.directory "$PWD" + export BUILD_TARGET="${{ inputs.build-target }}" + export BUNDLE_KEY="${{ inputs.bundle-key }}" + ./ci/dash/bundle-artifacts.sh extract + shell: bash + + - name: Download corpus + run: | + mkdir -p /tmp/fuzz_corpus + + # Layer 1: bitcoin-core inherited corpus + if git clone --depth=1 https://github.com/bitcoin-core/qa-assets /tmp/qa-assets; then + if [ -d "/tmp/qa-assets/fuzz_corpora" ]; then + cp -r /tmp/qa-assets/fuzz_corpora/. /tmp/fuzz_corpus/ + echo "Loaded bitcoin-core corpus" + fi + else + echo "WARNING: Failed to clone bitcoin-core/qa-assets (non-fatal)" + fi + + # Layer 2: Dash-specific corpus (overlays on top) + if git clone --depth=1 https://github.com/dashpay/qa-assets /tmp/dash-qa-assets; then + if [ -d "/tmp/dash-qa-assets/fuzz/corpora" ]; then + cp -r /tmp/dash-qa-assets/fuzz/corpora/. /tmp/fuzz_corpus/ + echo "Loaded Dash-specific corpus" + fi + else + echo "WARNING: Failed to clone dashpay/qa-assets (non-fatal)" + fi + + # Layer 3: Generate synthetic seeds for Dash-specific targets + if [ -f "contrib/fuzz/seed_corpus_from_chain.py" ]; then + python3 contrib/fuzz/seed_corpus_from_chain.py --synthetic-only -o /tmp/fuzz_corpus + fi + shell: bash + + - name: Run fuzz regression tests + id: fuzz-test + run: | + export BUILD_TARGET="${{ inputs.build-target }}" + source ./ci/dash/matrix.sh + + BUILD_DIR="build-ci/dashcore-${BUILD_TARGET}" + FUZZ_BIN="${BUILD_DIR}/src/test/fuzz/fuzz" + + if [ ! -x "$FUZZ_BIN" ]; then + echo "ERROR: Fuzz binary not found at $FUZZ_BIN" + exit 1 + fi + + # detect_leaks=0 is intentional for fuzz regression due to libFuzzer/LSan noise. + export ASAN_OPTIONS="detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1:detect_leaks=0" + export LSAN_OPTIONS="suppressions=${BUILD_DIR}/test/sanitizer_suppressions/lsan" + export UBSAN_OPTIONS="suppressions=${BUILD_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1" + + # Get list of all targets + TARGETS=$(PRINT_ALL_FUZZ_TARGETS_AND_ABORT=1 "$FUZZ_BIN" 2>/tmp/fuzz_target_discovery.err || true) + TARGET_COUNT=$(echo "$TARGETS" | grep -c '[^[:space:]]' || true) + if [ "$TARGET_COUNT" -eq 0 ]; then + if [ -s /tmp/fuzz_target_discovery.err ]; then + cat /tmp/fuzz_target_discovery.err + fi + echo "::error::No fuzz targets found — binary may have failed to start" + exit 1 + fi + echo "Found $TARGET_COUNT fuzz targets" + + FAILED=0 + PASSED=0 + FAILED_TARGETS="" + + while IFS= read -r target; do + [ -z "$target" ] && continue + corpus_dir="/tmp/fuzz_corpus/${target}" + + if [ ! -d "$corpus_dir" ] || [ -z "$(ls -A "$corpus_dir" 2>/dev/null)" ]; then + # No corpus for this target — run with empty input for 10s + # This catches basic initialization crashes + echo "::group::${target} (empty corpus, 10s run)" + mkdir -p "$corpus_dir" + # timeout(30) intentionally exceeds -max_total_time=10 to absorb startup/teardown jitter + # while still terminating genuinely hung processes. + if FUZZ="$target" timeout 30 "$FUZZ_BIN" \ + -rss_limit_mb=4000 \ + -max_total_time=10 \ + -reload=0 \ + "$corpus_dir" 2>&1; then + echo "PASS: $target (empty corpus)" + PASSED=$((PASSED + 1)) + else + EXIT_CODE=$? + echo "::error::FAIL: $target exited with code $EXIT_CODE" + FAILED=$((FAILED + 1)) + FAILED_TARGETS="${FAILED_TARGETS} - ${target} (exit code ${EXIT_CODE})\n" + fi + echo "::endgroup::" + continue + fi + + # Run corpus regression (replay all inputs) + echo "::group::${target} ($(find "$corpus_dir" -maxdepth 1 -type f | wc -l) inputs)" + if FUZZ="$target" "$FUZZ_BIN" \ + -rss_limit_mb=4000 \ + -runs=0 \ + "$corpus_dir" 2>&1; then + echo "PASS: $target" + PASSED=$((PASSED + 1)) + else + echo "::error::FAIL: $target" + FAILED=$((FAILED + 1)) + FAILED_TARGETS="${FAILED_TARGETS} - ${target}\n" + fi + echo "::endgroup::" + done <<< "$TARGETS" + + echo "" + echo "=== Fuzz Regression Summary ===" + echo "Passed: $PASSED" + echo "Failed: $FAILED" + echo "Total: $TARGET_COUNT" + + if [ $FAILED -gt 0 ]; then + echo "" + echo "=== Failed Targets ===" + printf '%b' "$FAILED_TARGETS" + echo "::error::$FAILED fuzz target(s) failed regression testing" + exit 1 + fi + shell: bash diff --git a/contrib/fuzz/README.md b/contrib/fuzz/README.md new file mode 100644 index 000000000000..ac746bbb4447 --- /dev/null +++ b/contrib/fuzz/README.md @@ -0,0 +1,103 @@ +# Dash Core Fuzz Testing Tools + +This directory contains tools for continuous fuzz testing of Dash Core. + +## Overview + +Dash Core inherits ~100 fuzz targets from Bitcoin Core and adds Dash-specific +targets for: +- Special transaction serialization (ProTx, CoinJoin, Asset Lock/Unlock, etc.) +- BLS operations and IES encryption +- LLMQ/DKG message handling +- Governance object validation +- Masternode list management + +Some Dash-specific fuzz targets are planned/in-progress. Corpus tooling +pre-generates synthetic seeds for those target names so coverage is ready when +the targets are added. + +## Tools + +### `continuous_fuzz_daemon.sh` + +A daemon script that continuously cycles through all fuzz targets with persistent +corpus storage and crash detection. + +```bash +# Run all targets, 10 minutes each, indefinitely +./continuous_fuzz_daemon.sh --fuzz-bin /path/to/fuzz --time-per-target 600 + +# Run specific targets only +./continuous_fuzz_daemon.sh --targets bls_operations,bls_ies --time-per-target 3600 + +# Single cycle (good for cron) +./continuous_fuzz_daemon.sh --single-cycle --time-per-target 300 + +# Dry run — list targets +./continuous_fuzz_daemon.sh --dry-run +``` + +**Output directories:** +- `~/fuzz_corpus//` — persistent corpus per target +- `~/fuzz_crashes//` — crash artifacts (crash-*, timeout-*, oom-*) +- `~/fuzz_logs/` — per-target logs and daemon log + +### `seed_corpus_from_chain.py` + +Extracts real-world data from a running Dash node into fuzzer-consumable corpus +files. Connects via `dash-cli` RPC. + +```bash +# Extract from a running node +./seed_corpus_from_chain.py -o /path/to/corpus --blocks 500 + +# Generate only synthetic seeds (no running node required) +./seed_corpus_from_chain.py -o /path/to/corpus --synthetic-only +``` + +**What it extracts:** +- Serialized blocks and block headers +- Special transactions (ProRegTx, ProUpServTx, CoinJoin, Asset Lock, etc.) +- Governance objects and votes +- Masternode list entries +- Quorum commitment data + +## CI Integration + +The `test-fuzz.yml` workflow runs fuzz regression tests on every PR: + +1. Builds fuzz targets with sanitizers (ASan + UBSan + libFuzzer) +2. Downloads seed corpus from `bitcoin-core/qa-assets` + synthetic Dash seeds +3. Replays all corpus inputs against every fuzz target +4. Reports failures as CI errors + +This catches regressions in seconds — any code change that causes a previously- +working input to crash will be caught. + +## Building Fuzz Targets + +```bash +# Configure with fuzzing + sanitizers +./configure --enable-fuzz --with-sanitizers=fuzzer,address,undefined \ + CC='clang -ftrivial-auto-var-init=pattern' \ + CXX='clang++ -ftrivial-auto-var-init=pattern' + +# Build +make -j$(nproc) + +# The fuzz binary is at src/test/fuzz/fuzz +# Select target with FUZZ= +FUZZ=bls_operations ./src/test/fuzz/fuzz corpus_dir/ +``` + +## Contributing Corpus Inputs + +Found an interesting input? Add it to the appropriate corpus directory: + +```bash +# The filename should be the sha256 of the content (for dedup) +sha256sum input_file +cp input_file fuzz_corpus// +``` + +Crash-reproducing inputs are especially valuable — they become regression tests. diff --git a/contrib/fuzz/continuous_fuzz_daemon.sh b/contrib/fuzz/continuous_fuzz_daemon.sh new file mode 100755 index 000000000000..ad172026afac --- /dev/null +++ b/contrib/fuzz/continuous_fuzz_daemon.sh @@ -0,0 +1,292 @@ +#!/usr/bin/env bash +# Copyright (c) 2026 The Dash Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# +# Continuous fuzzing daemon — cycles through all fuzz targets with +# persistent corpus storage, crash detection, and logging. +# +# Usage: +# ./continuous_fuzz_daemon.sh [options] +# +# Options: +# --fuzz-bin Path to the fuzz binary (default: auto-detect) +# --corpus-dir Base directory for corpus storage (default: ~/fuzz_corpus) +# --crashes-dir Directory for crash artifacts (default: ~/fuzz_crashes) +# --log-dir Directory for log files (default: ~/fuzz_logs) +# --time-per-target Seconds to fuzz each target per cycle (default: 600) +# --rss-limit RSS memory limit in MB (default: 4000) +# --targets Comma-separated list of targets to fuzz (default: all) +# --exclude Comma-separated list of targets to exclude +# --single-cycle Run one cycle and exit (for cron usage) +# --dry-run List targets and exit without fuzzing + +export LC_ALL=C +set -euo pipefail + +# --- Configuration defaults --- +FUZZ_BIN="" +TIMEOUT_BIN="" +CORPUS_DIR="${HOME}/fuzz_corpus" +CRASHES_DIR="${HOME}/fuzz_crashes" +LOG_DIR="${HOME}/fuzz_logs" +TIME_PER_TARGET=600 +RSS_LIMIT_MB=4000 +TARGET_LIST="" +EXCLUDE_LIST="" +SINGLE_CYCLE=false +DRY_RUN=false + +shuffle_lines() { + if command -v shuf >/dev/null 2>&1; then + shuf + else + awk 'BEGIN{srand()} {print rand() "\t" $0}' | sort -k1,1n | cut -f2- + fi +} + +# --- Parse arguments --- +while [[ $# -gt 0 ]]; do + case "$1" in + --fuzz-bin) [[ $# -ge 2 ]] || { echo "ERROR: --fuzz-bin requires a value" >&2; exit 1; }; FUZZ_BIN="$2"; shift 2;; + --corpus-dir) [[ $# -ge 2 ]] || { echo "ERROR: --corpus-dir requires a value" >&2; exit 1; }; CORPUS_DIR="$2"; shift 2;; + --crashes-dir) [[ $# -ge 2 ]] || { echo "ERROR: --crashes-dir requires a value" >&2; exit 1; }; CRASHES_DIR="$2"; shift 2;; + --log-dir) [[ $# -ge 2 ]] || { echo "ERROR: --log-dir requires a value" >&2; exit 1; }; LOG_DIR="$2"; shift 2;; + --time-per-target) [[ $# -ge 2 ]] || { echo "ERROR: --time-per-target requires a value" >&2; exit 1; }; TIME_PER_TARGET="$2"; shift 2;; + --rss-limit) [[ $# -ge 2 ]] || { echo "ERROR: --rss-limit requires a value" >&2; exit 1; }; RSS_LIMIT_MB="$2"; shift 2;; + --targets) [[ $# -ge 2 ]] || { echo "ERROR: --targets requires a value" >&2; exit 1; }; TARGET_LIST="$2"; shift 2;; + --exclude) [[ $# -ge 2 ]] || { echo "ERROR: --exclude requires a value" >&2; exit 1; }; EXCLUDE_LIST="$2"; shift 2;; + --single-cycle) SINGLE_CYCLE=true; shift;; + --dry-run) DRY_RUN=true; shift;; + -h|--help) + sed -n '2,/^$/s/^# \?//p' "$0" + exit 0 + ;; + *) echo "Unknown option: $1" >&2; exit 1;; + esac +done + +# --- Validate numeric arguments --- +if ! [[ "$TIME_PER_TARGET" =~ ^[0-9]+$ ]]; then + echo "ERROR: --time-per-target must be a positive integer, got '$TIME_PER_TARGET'" >&2 + exit 1 +fi +if ! [[ "$RSS_LIMIT_MB" =~ ^[0-9]+$ ]]; then + echo "ERROR: --rss-limit must be a positive integer, got '$RSS_LIMIT_MB'" >&2 + exit 1 +fi + +# --- Auto-detect fuzz binary --- +if [[ -z "$FUZZ_BIN" ]]; then + for candidate in \ + "${HOME}/dash/src/test/fuzz/fuzz" \ + "${HOME}/dash/build_fuzz/src/test/fuzz/fuzz" \ + "$(command -v fuzz 2>/dev/null || true)"; do + if [[ -x "$candidate" ]]; then + FUZZ_BIN="$candidate" + break + fi + done + if [[ -z "$FUZZ_BIN" ]]; then + echo "ERROR: Could not find fuzz binary. Use --fuzz-bin to specify." >&2 + exit 1 + fi +fi + +if command -v timeout >/dev/null 2>&1; then + TIMEOUT_BIN="timeout" +elif command -v gtimeout >/dev/null 2>&1; then + TIMEOUT_BIN="gtimeout" +else + echo "WARNING: timeout command not found; external hang protection disabled" >&2 +fi + +# --- Setup directories --- +mkdir -p "$CORPUS_DIR" "$CRASHES_DIR" "$LOG_DIR" + +# --- Discover targets --- +get_all_targets() { + PRINT_ALL_FUZZ_TARGETS_AND_ABORT=1 "$FUZZ_BIN" 2>&1 || true +} + +filter_targets() { + local all_targets="$1" + local result=() + + if [[ -n "$TARGET_LIST" ]]; then + # Use only specified targets + IFS=',' read -ra wanted <<< "$TARGET_LIST" + for t in "${wanted[@]}"; do + if echo "$all_targets" | grep -qx "$t"; then + result+=("$t") + else + echo "WARNING: Target '$t' not found in fuzz binary" >&2 + fi + done + else + # Use all targets + while IFS= read -r t; do + [[ -n "$t" ]] && result+=("$t") + done <<< "$all_targets" + fi + + # Apply exclusions + if [[ -n "$EXCLUDE_LIST" ]]; then + IFS=',' read -ra excluded <<< "$EXCLUDE_LIST" + local filtered=() + for t in "${result[@]}"; do + local skip=false + for ex in "${excluded[@]}"; do + [[ "$t" == "$ex" ]] && skip=true && break + done + $skip || filtered+=("$t") + done + result=("${filtered[@]}") + fi + + printf '%s\n' "${result[@]}" +} + +# --- Logging --- +log() { + local level="$1"; shift + echo "[$(date '+%Y-%m-%d %H:%M:%S')] [$level] $*" | tee -a "${LOG_DIR}/daemon.log" +} + +trap 'log "INFO" "Caught signal — shutting down"; exit 0' SIGTERM SIGINT + +# --- Run one fuzz target --- +run_target() { + local target="$1" + local target_corpus="${CORPUS_DIR}/${target}" + local target_crashes="${CRASHES_DIR}/${target}" + local target_log="${LOG_DIR}/${target}.log" + + mkdir -p "$target_corpus" "$target_crashes" + + log "INFO" "Fuzzing target: ${target} for ${TIME_PER_TARGET}s" + + local exit_code=0 + if [[ -n "$TIMEOUT_BIN" ]]; then + FUZZ="$target" \ + ASAN_OPTIONS="detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1:detect_leaks=0" \ + "$TIMEOUT_BIN" $((TIME_PER_TARGET + 30)) "$FUZZ_BIN" \ + -rss_limit_mb="$RSS_LIMIT_MB" \ + -max_total_time="$TIME_PER_TARGET" \ + -reload=0 \ + -print_final_stats=1 \ + -artifact_prefix="${target_crashes}/" \ + "$target_corpus" \ + > "$target_log" 2>&1 || exit_code=$? + else + FUZZ="$target" \ + ASAN_OPTIONS="detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1:detect_leaks=0" \ + "$FUZZ_BIN" \ + -rss_limit_mb="$RSS_LIMIT_MB" \ + -max_total_time="$TIME_PER_TARGET" \ + -reload=0 \ + -print_final_stats=1 \ + -artifact_prefix="${target_crashes}/" \ + "$target_corpus" \ + > "$target_log" 2>&1 || exit_code=$? + fi + + # Check for crashes + local crash_count + crash_count=$(find "$target_crashes" -name 'crash-*' -o -name 'timeout-*' -o -name 'oom-*' 2>/dev/null | wc -l) + + if [[ "$crash_count" -gt 0 ]]; then + log "CRASH" "Target '${target}' produced ${crash_count} crash artifact(s)!" + log "CRASH" "Artifacts saved to: ${target_crashes}/" + + # Extract crash details from log + grep -E "SUMMARY|ERROR|BINGO|crash-|timeout-|oom-" "$target_log" 2>/dev/null | while IFS= read -r line; do + log "CRASH" " $line" + done + fi + + # Log stats + local corpus_size + corpus_size=$(find "$target_corpus" -type f | wc -l) + local corpus_bytes + corpus_bytes=$(du -sh "$target_corpus" 2>/dev/null | cut -f1) + + if [[ $exit_code -eq 0 ]]; then + log "INFO" "Target '${target}' completed: corpus=${corpus_size} files (${corpus_bytes}), exit=${exit_code}" + else + log "WARN" "Target '${target}' exited with code ${exit_code}: corpus=${corpus_size} files (${corpus_bytes})" + fi + + return 0 # Don't fail the daemon on individual target failures +} + +# --- Main loop --- +main() { + log "INFO" "=== Continuous Fuzzing Daemon Starting ===" + log "INFO" "Fuzz binary: ${FUZZ_BIN}" + log "INFO" "Corpus dir: ${CORPUS_DIR}" + log "INFO" "Crashes dir: ${CRASHES_DIR}" + log "INFO" "Time per target: ${TIME_PER_TARGET}s" + log "INFO" "RSS limit: ${RSS_LIMIT_MB}MB" + + local all_targets + all_targets=$(get_all_targets) + local targets + targets=$(filter_targets "$all_targets") + if [[ -z "$targets" ]]; then + log "ERROR" "No matching fuzz targets found" + exit 1 + fi + local target_count + target_count=$(echo "$targets" | wc -l) + + log "INFO" "Found ${target_count} fuzz target(s)" + + if $DRY_RUN; then + log "INFO" "DRY RUN — targets that would be fuzzed:" + echo "$targets" + exit 0 + fi + + local cycle=0 + while true; do + cycle=$((cycle + 1)) + log "INFO" "=== Starting cycle ${cycle} (${target_count} targets × ${TIME_PER_TARGET}s) ===" + + # Snapshot crash count before this cycle + local crashes_before + crashes_before=$(find "$CRASHES_DIR" -name 'crash-*' -o -name 'timeout-*' -o -name 'oom-*' 2>/dev/null | wc -l) + + # Shuffle targets each cycle for variety + local shuffled + shuffled=$(echo "$targets" | shuffle_lines) + + while IFS= read -r target; do + [[ -z "$target" ]] && continue + run_target "$target" + done <<< "$shuffled" + + # Cycle summary + local total_corpus + total_corpus=$(du -sh "$CORPUS_DIR" 2>/dev/null | cut -f1) + local total_crashes + total_crashes=$(find "$CRASHES_DIR" -name 'crash-*' -o -name 'timeout-*' -o -name 'oom-*' 2>/dev/null | wc -l) + local new_crashes=$((total_crashes - crashes_before)) + log "INFO" "=== Cycle ${cycle} complete: total corpus=${total_corpus}, new crashes=${new_crashes}, total crashes=${total_crashes} ===" + + if $SINGLE_CYCLE; then + if [[ "$new_crashes" -gt 0 ]]; then + log "WARN" "Single-cycle mode — exiting with ${new_crashes} new crash(es) found" + exit 1 + fi + log "INFO" "Single-cycle mode — exiting" + break + fi + + # Brief pause between cycles + log "INFO" "Sleeping 60s before next cycle..." + sleep 60 + done +} + +main diff --git a/contrib/fuzz/seed_corpus_from_chain.py b/contrib/fuzz/seed_corpus_from_chain.py new file mode 100755 index 000000000000..7ded936df481 --- /dev/null +++ b/contrib/fuzz/seed_corpus_from_chain.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python3 +# Copyright (c) 2026 The Dash Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Extract seed corpus inputs from a running Dash node for fuzz testing. + +Connects to a local dashd via RPC and extracts real-world serialized data +(transactions, blocks, special transactions, governance objects, etc.) +into fuzzer-consumable corpus files. + +Usage: + ./seed_corpus_from_chain.py --output-dir /path/to/corpus [options] + +Requirements: + - Running dashd with RPC enabled + - python-bitcoinrpc or compatible RPC library (or uses subprocess + dash-cli) +""" + +import argparse +import hashlib +import json +import subprocess +import sys +from pathlib import Path + + +def dash_cli(*args, datadir=None): + """Call dash-cli and return the result.""" + cmd = ["dash-cli"] + if datadir: + cmd.append(f"-datadir={datadir}") + cmd.extend(args) + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True, timeout=30) + return result.stdout.strip() + except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired) as e: + print(f"WARNING: dash-cli {' '.join(args)} failed: {e}", file=sys.stderr) + return None + + +def save_corpus_input(output_dir, target_name, data_hex): + """Save a hex-encoded blob as a corpus input file.""" + target_dir = output_dir / target_name + target_dir.mkdir(parents=True, exist_ok=True) + + try: + raw_bytes = bytes.fromhex(data_hex) + except ValueError: + print(f"WARNING: Invalid hex data for target {target_name}, skipping", file=sys.stderr) + return False + + filename = hashlib.sha256(raw_bytes).hexdigest()[:16] + filepath = target_dir / filename + + if not filepath.exists(): + filepath.write_bytes(raw_bytes) + return True + return False + + +def read_compact_size(raw, offset): + """Decode a CompactSize integer from raw bytes at offset.""" + if offset >= len(raw): + raise ValueError("truncated CompactSize") + + first = raw[offset] + offset += 1 + if first < 253: + return first, offset + if first == 253: + if offset + 2 > len(raw): + raise ValueError("truncated CompactSize (uint16)") + return int.from_bytes(raw[offset:offset + 2], byteorder="little"), offset + 2 + if first == 254: + if offset + 4 > len(raw): + raise ValueError("truncated CompactSize (uint32)") + return int.from_bytes(raw[offset:offset + 4], byteorder="little"), offset + 4 + if offset + 8 > len(raw): + raise ValueError("truncated CompactSize (uint64)") + return int.from_bytes(raw[offset:offset + 8], byteorder="little"), offset + 8 + + +def extract_extra_payload_hex(raw_tx_hex, extra_payload_size): + """Extract extra payload bytes by parsing a raw special transaction.""" + try: + raw_tx = bytes.fromhex(raw_tx_hex) + except ValueError: + return None, "raw transaction is not valid hex" + + if extra_payload_size <= 0: + return None, "extraPayloadSize must be > 0" + + try: + offset = 0 + if len(raw_tx) < 4: + return None, "raw transaction too short for nVersion/nType" + + n32bit_version = int.from_bytes(raw_tx[offset:offset + 4], byteorder="little") + n_version = n32bit_version & 0xFFFF + n_type = (n32bit_version >> 16) & 0xFFFF + offset += 4 + + if n_version < 3 or n_type == 0: + return None, f"transaction is not a special tx (version={n_version}, type={n_type})" + + vin_count, offset = read_compact_size(raw_tx, offset) + for _ in range(vin_count): + # CTxIn: prevout hash (32), prevout index (4), scriptSig, sequence (4) + if offset + 36 > len(raw_tx): + return None, "truncated tx input prevout" + offset += 36 + + script_len, offset = read_compact_size(raw_tx, offset) + if offset + script_len + 4 > len(raw_tx): + return None, "truncated tx input scriptSig/sequence" + offset += script_len + 4 + + vout_count, offset = read_compact_size(raw_tx, offset) + for _ in range(vout_count): + # CTxOut: amount (8), scriptPubKey + if offset + 8 > len(raw_tx): + return None, "truncated tx output amount" + offset += 8 + + script_len, offset = read_compact_size(raw_tx, offset) + if offset + script_len > len(raw_tx): + return None, "truncated tx output scriptPubKey" + offset += script_len + + if offset + 4 > len(raw_tx): + return None, "truncated nLockTime" + offset += 4 + + payload_len, offset = read_compact_size(raw_tx, offset) + if payload_len != extra_payload_size: + return None, f"extra payload size mismatch (expected {extra_payload_size}, parsed {payload_len})" + if offset + payload_len > len(raw_tx): + return None, "truncated extra payload" + + payload = raw_tx[offset:offset + payload_len] + offset += payload_len + if offset != len(raw_tx): + return None, f"unexpected trailing bytes after payload ({len(raw_tx) - offset} bytes)" + + return payload.hex(), None + except ValueError as e: + return None, str(e) + + +def extract_blocks(output_dir, count=20, datadir=None): + """Extract recent blocks as corpus inputs.""" + print(f"Extracting {count} recent blocks...") + height_str = dash_cli("getblockcount", datadir=datadir) + if not height_str: + return 0 + + height = int(height_str) + saved = 0 + + for h in range(max(0, height - count), height + 1): + block_hash = dash_cli("getblockhash", str(h), datadir=datadir) + if not block_hash: + continue + + # Get serialized block + block_hex = dash_cli("getblock", block_hash, "0", datadir=datadir) + if block_hex: + if save_corpus_input(output_dir, "block_deserialize", block_hex): + saved += 1 + if save_corpus_input(output_dir, "block", block_hex): + saved += 1 + + print(f" Saved {saved} block corpus inputs") + return saved + + +def extract_special_txs(output_dir, count=100, datadir=None): + """Extract special transactions (ProTx, etc.) from recent blocks.""" + print(f"Scanning {count} recent blocks for special transactions...") + height_str = dash_cli("getblockcount", datadir=datadir) + if not height_str: + return 0 + + height = int(height_str) + saved = 0 + + # Map special tx types to fuzz target names + type_map = { + 1: "dash_proreg_tx", # ProRegTx + 2: "dash_proupserv_tx", # ProUpServTx + 3: "dash_proupreg_tx", # ProUpRegTx + 4: "dash_prouprev_tx", # ProUpRevTx + 5: "dash_cbtx", # CbTx (coinbase) + 6: "dash_final_commitment_tx_payload", # Quorum commitment + 7: "dash_mnhf_tx_payload", # MN HF signal + 8: "dash_asset_lock_payload", # Asset Lock + 9: "dash_asset_unlock_payload", # Asset Unlock + } + + for h in range(max(0, height - count), height + 1): + block_hash = dash_cli("getblockhash", str(h), datadir=datadir) + if not block_hash: + continue + + block_json = dash_cli("getblock", block_hash, "2", datadir=datadir) + if not block_json: + continue + + try: + block = json.loads(block_json) + except json.JSONDecodeError: + continue + + for tx in block.get("tx", []): + tx_type = tx.get("type", 0) + if tx_type == 0: + continue + + # Get raw transaction + txid = tx.get("txid", "") + raw_tx = dash_cli("getrawtransaction", txid, datadir=datadir) + if not raw_tx: + continue + + # Save full transaction + if save_corpus_input(output_dir, "decode_tx", raw_tx): + saved += 1 + + # Extract special payload if we know the target + extra_payload_size = tx.get("extraPayloadSize", 0) + try: + extra_payload_size = int(extra_payload_size) + except (TypeError, ValueError): + extra_payload_size = 0 + + if extra_payload_size > 0 and tx_type in type_map: + payload_hex, err = extract_extra_payload_hex(raw_tx, extra_payload_size) + if not payload_hex: + print( + f"WARNING: Skipping special payload for tx {txid}: {err}", + file=sys.stderr, + ) + continue + + target = type_map[tx_type] + # Save payload bytes for both deserialize and roundtrip variants. + for suffix in ["_deserialize", "_roundtrip"]: + if save_corpus_input(output_dir, f"{target}{suffix}", payload_hex): + saved += 1 + + print(f" Saved {saved} special transaction corpus inputs") + return saved + + +def extract_governance_objects(output_dir, datadir=None): + """Extract governance objects (proposals, triggers).""" + print("Extracting governance objects...") + result = dash_cli("gobject", "list", "all", datadir=datadir) + if not result: + return 0 + + saved = 0 + try: + objects = json.loads(result) + for _obj_hash, obj_data in objects.items(): + data_hex = obj_data.get("DataHex", "") + if data_hex: + if save_corpus_input(output_dir, "dash_governance_object_deserialize", data_hex): + saved += 1 + if save_corpus_input(output_dir, "dash_governance_object_roundtrip", data_hex): + saved += 1 + except (json.JSONDecodeError, AttributeError): + pass + + print(f" Saved {saved} governance corpus inputs") + return saved + + +def extract_masternode_list(output_dir, datadir=None): + """Extract masternode list entries.""" + print("Extracting masternode list data...") + result = dash_cli("protx", "list", "registered", "true", datadir=datadir) + if not result: + return 0 + + saved = 0 + try: + mn_list = json.loads(result) + for mn in mn_list: + protx_hash = mn.get("proTxHash", "") + if not protx_hash: + continue + + raw_tx = dash_cli("getrawtransaction", protx_hash, datadir=datadir) + if not raw_tx: + continue + + # Save full raw tx for full-transaction targets + if save_corpus_input(output_dir, "decode_tx", raw_tx): + saved += 1 + + # Extract the special payload for payload-specific targets + # ProRegTx type is 1, get extraPayloadSize from verbose tx + verbose_tx = dash_cli("getrawtransaction", protx_hash, "true", datadir=datadir) + if not verbose_tx: + continue + try: + tx_info = json.loads(verbose_tx) + except json.JSONDecodeError: + continue + + extra_payload_size = tx_info.get("extraPayloadSize", 0) + try: + extra_payload_size = int(extra_payload_size) + except (TypeError, ValueError): + extra_payload_size = 0 + + if extra_payload_size > 0: + payload_hex, err = extract_extra_payload_hex(raw_tx, extra_payload_size) + if payload_hex: + for target in ["dash_proreg_tx_deserialize", "dash_proreg_tx_roundtrip"]: + if save_corpus_input(output_dir, target, payload_hex): + saved += 1 + else: + print(f"WARNING: Could not extract payload from protx {protx_hash}: {err}", file=sys.stderr) + except (json.JSONDecodeError, AttributeError): + pass + + print(f" Saved {saved} masternode corpus inputs") + return saved + + +def extract_quorum_info(output_dir, datadir=None): + """Extract quorum-related data from the chain. + + Note: quorum snapshot deserialize targets expect binary-serialized + CQuorumSnapshot data, not JSON. We extract final commitment transactions + from blocks instead, which are already captured by extract_special_txs() + for type 6 (TRANSACTION_QUORUM_COMMITMENT). This function focuses on + extracting quorum memberof data as raw bytes for other quorum targets. + """ + print("Extracting quorum data...") + result = dash_cli("quorum", "list", datadir=datadir) + if not result: + return 0 + + saved = 0 + try: + quorum_list = json.loads(result) + for qtype, hashes in quorum_list.items(): + for qhash in hashes[:5]: # Limit per type + # Get the quorum commitment transaction via selectquorum + # which gives us the quorumHash we can look up in blocks + qinfo_str = dash_cli("quorum", "info", qtype, qhash, datadir=datadir) + if not qinfo_str: + continue + try: + qinfo = json.loads(qinfo_str) + except json.JSONDecodeError: + continue + # Extract the commitment tx if available + mining_hash = qinfo.get("minedBlock", "") + if mining_hash: + block_hex = dash_cli("getblock", mining_hash, "0", datadir=datadir) + if block_hex and save_corpus_input(output_dir, "block_deserialize", block_hex): + saved += 1 + except (json.JSONDecodeError, AttributeError): + pass + + print(f" Saved {saved} quorum corpus inputs") + return saved + + +# +# These Dash-specific target names are forward-looking: corresponding fuzz targets +# are planned for a future PR. We pre-generate seeds now so coverage is ready as +# soon as those targets land. +def create_synthetic_seeds(output_dir): + """Create minimal synthetic seed inputs for targets without chain data.""" + print("Creating synthetic seed inputs...") + saved = 0 + + # Targets that need synthetic seeds (serialized structs with known formats) + synthetic_seeds = { + # CoinJoin messages — minimal valid-ish payloads + "dash_coinjoin_accept_deserialize": [ + "00000000" + "00" * 4, # nDenom(4) + txCollateral + ], + "dash_coinjoin_queue_deserialize": [ + "00000000" + "00" * 48 + "00" * 96 + "0000000000000000", # nDenom + proTxHash + vchSig + nTime + ], + "dash_coinjoin_status_update_deserialize": [ + "00000000" + "00000000" + "00000000", # nSessionID + nState + nStatusUpdate + ], + # LLMQ messages + "dash_recovered_sig_deserialize": [ + "64" + "00" * 32 + "00" * 32 + "00" * 96, # llmqType + quorumHash + id + sig + ], + "dash_sig_ses_ann_deserialize": [ + "64" + "00" * 32 + "00000000" + "00" * 32, # llmqType + quorumHash + nSessionId + id + ], + "dash_sig_share_deserialize": [ + "64" + "00" * 32 + "00000000" + "00" * 32 + "0000" + "00" * 96, + ], + # MNAuth + "dash_mnauth_deserialize": [ + "00" * 32 + "00" * 32 + "00" * 96, # proRegTxHash + signChallenge + sig + ], + # DKG messages + "dash_dkg_complaint_deserialize": [ + "64" + "00" * 32 + "00" * 32 + "0000" + "00", # minimal + ], + } + + for target, seeds in synthetic_seeds.items(): + for seed_hex in seeds: + if save_corpus_input(output_dir, target, seed_hex): + saved += 1 + # Also save roundtrip variant + roundtrip_target = target.replace("_deserialize", "_roundtrip") + if save_corpus_input(output_dir, roundtrip_target, seed_hex): + saved += 1 + + print(f" Created {saved} synthetic seed inputs") + return saved + + +def main(): + parser = argparse.ArgumentParser( + description="Extract seed corpus from a running Dash node for fuzz testing" + ) + parser.add_argument( + "--output-dir", "-o", + required=True, + help="Output directory for corpus files" + ) + parser.add_argument( + "--datadir", + help="Dash data directory (passed to dash-cli)" + ) + parser.add_argument( + "--blocks", type=int, default=100, + help="Number of recent blocks to scan (default: 100)" + ) + parser.add_argument( + "--synthetic-only", + action="store_true", + help="Only generate synthetic seeds (no RPC required)" + ) + args = parser.parse_args() + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + total = 0 + + if not args.synthetic_only: + total += extract_blocks(output_dir, count=args.blocks, datadir=args.datadir) + total += extract_special_txs(output_dir, count=args.blocks, datadir=args.datadir) + total += extract_governance_objects(output_dir, datadir=args.datadir) + total += extract_masternode_list(output_dir, datadir=args.datadir) + total += extract_quorum_info(output_dir, datadir=args.datadir) + + total += create_synthetic_seeds(output_dir) + + print(f"\nTotal: {total} corpus inputs saved to {output_dir}") + + # Print summary + print("\nCorpus directory summary:") + for target_dir in sorted(output_dir.iterdir()): + if target_dir.is_dir(): + file_count = len(list(target_dir.iterdir())) + print(f" {target_dir.name}: {file_count} files") + + +if __name__ == "__main__": + main() From 093a849ba48919023d3094f094d10b33e9c701d9 Mon Sep 17 00:00:00 2001 From: PastaClaw Date: Fri, 27 Feb 2026 12:31:12 -0600 Subject: [PATCH 2/9] test(fuzz): add Dash-specific deserialization and roundtrip fuzz targets Add fuzz targets for Dash-specific serializable types: deserialize_dash.cpp: - CQuorumSnapshot, CDeterministicMNList, CDeterministicMNListDiff - CSimplifiedMNList, CSimplifiedMNListDiff, CGovernanceObject - CMasternodeMetaInfo, CBloomFilter, CService, CAddress - CCoinJoinBroadcastTx, CCoinJoinQueue, CCoinJoinStatusUpdate - CCoinJoinEntry/AddScriptSig, CCompactDenominations - Various Dash special TX payloads (ProReg, ProUpServ, ProUpReg, ProUpRev, CbTx, QcTx, MnHfTx, AssetLock, AssetUnlock) roundtrip_dash.cpp: - Roundtrip (serialize-deserialize-reserialize) tests for the same types, verifying byte-perfect serialization symmetry --- src/test/fuzz/deserialize_dash.cpp | 309 +++++++++++++++++++++++++++++ src/test/fuzz/roundtrip_dash.cpp | 187 +++++++++++++++++ 2 files changed, 496 insertions(+) create mode 100644 src/test/fuzz/deserialize_dash.cpp create mode 100644 src/test/fuzz/roundtrip_dash.cpp diff --git a/src/test/fuzz/deserialize_dash.cpp b/src/test/fuzz/deserialize_dash.cpp new file mode 100644 index 000000000000..978de29b5f65 --- /dev/null +++ b/src/test/fuzz/deserialize_dash.cpp @@ -0,0 +1,309 @@ +// Copyright (c) 2026 The Dash Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +// Deserialization fuzz targets for Dash-specific types. +// Follows the same pattern as deserialize.cpp for Bitcoin Core types. + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace { + +const BasicTestingSetup* g_setup; + +struct dash_invalid_fuzzing_input_exception : public std::exception { +}; + +template +void DashDeserializeFromFuzzingInput(FuzzBufferType buffer, T& obj, + const std::optional protocol_version = std::nullopt, + const int ser_type = SER_NETWORK) +{ + CDataStream ds(buffer, ser_type, PROTOCOL_VERSION); + if (protocol_version) { + ds.SetVersion(*protocol_version); + } else { + try { + int version; + ds >> version; + ds.SetVersion(version); + } catch (const std::ios_base::failure&) { + throw dash_invalid_fuzzing_input_exception(); + } + } + try { + ds >> obj; + } catch (const std::ios_base::failure&) { + throw dash_invalid_fuzzing_input_exception(); + } + CDataStream sink(ser_type, ds.GetVersion()); + sink << obj; + assert(!sink.empty()); +} + +} // namespace + +void initialize_deserialize_dash() +{ + static const auto testing_setup = MakeNoLogFileContext<>(); + g_setup = testing_setup.get(); +} + +#define FUZZ_TARGET_DASH_DESERIALIZE(name, code) \ + FUZZ_TARGET(name, .init = initialize_deserialize_dash) \ + { \ + try { \ + code \ + } catch (const dash_invalid_fuzzing_input_exception&) { \ + } \ + } + +// --- evo/ types: Provider transactions --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_proreg_tx_deserialize, { + CProRegTx obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_proupserv_tx_deserialize, { + CProUpServTx obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_proupreg_tx_deserialize, { + CProUpRegTx obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_prouprev_tx_deserialize, { + CProUpRevTx obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- evo/ types: Asset Lock/Unlock (L1↔L2 bridge) --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_asset_lock_payload_deserialize, { + CAssetLockPayload obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_asset_unlock_payload_deserialize, { + CAssetUnlockPayload obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- evo/ types: Coinbase special payload --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_cbtx_deserialize, { + CCbTx obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- evo/ types: Credit pool --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_credit_pool_deserialize, { + CCreditPool obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- evo/ types: Deterministic masternode --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_deterministic_mn_deserialize, { + CDeterministicMN obj(0 /* internalId, will be overwritten by deserialization */); + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- evo/ types: Deterministic masternode state --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_dmn_state_deserialize, { + CDeterministicMNState obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_dmn_state_diff_deserialize, { + CDeterministicMNStateDiff obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- evo/ types: Simplified MN list --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_smn_list_entry_deserialize, { + CSimplifiedMNListEntry obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_get_smn_list_diff_deserialize, { + CGetSimplifiedMNListDiff obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_smn_list_diff_deserialize, { + CSimplifiedMNListDiff obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- evo/ types: MN auth and hard fork signaling --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_mnauth_deserialize, { + CMNAuth obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_mnhf_tx_deserialize, { + MNHFTx obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_mnhf_tx_payload_deserialize, { + MNHFTxPayload obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- llmq/ types: Quorum commitment --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_final_commitment_deserialize, { + llmq::CFinalCommitment obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_final_commitment_tx_payload_deserialize, { + llmq::CFinalCommitmentTxPayload obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- llmq/ types: DKG messages --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_dkg_complaint_deserialize, { + llmq::CDKGComplaint obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_dkg_justification_deserialize, { + llmq::CDKGJustification obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_dkg_premature_commitment_deserialize, { + llmq::CDKGPrematureCommitment obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- llmq/ types: Signing --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_recovered_sig_deserialize, { + llmq::CRecoveredSig obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_sig_share_deserialize, { + llmq::CSigShare obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_sig_ses_ann_deserialize, { + llmq::CSigSesAnn obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_sig_shares_inv_deserialize, { + llmq::CSigSharesInv obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_batched_sig_shares_deserialize, { + llmq::CBatchedSigShares obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- llmq/ types: Quorum data and rotation --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_quorum_data_request_deserialize, { + llmq::CQuorumDataRequest obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_get_quorum_rotation_info_deserialize, { + llmq::CGetQuorumRotationInfo obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_quorum_snapshot_deserialize, { + llmq::CQuorumSnapshot obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- governance/ types --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_governance_object_common_deserialize, { + Governance::Object obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_governance_object_deserialize, { + CGovernanceObject obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_governance_vote_deserialize, { + CGovernanceVote obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_vote_instance_deserialize, { + vote_instance_t obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_vote_rec_deserialize, { + vote_rec_t obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_governance_vote_file_deserialize, { + CGovernanceObjectVoteFile obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- bls/ types --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_bls_ies_encrypted_blob_deserialize, { + CBLSIESEncryptedBlob obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_bls_ies_multi_recipient_blobs_deserialize, { + CBLSIESMultiRecipientBlobs obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) + +// --- coinjoin/ types --- + +FUZZ_TARGET_DASH_DESERIALIZE(dash_coinjoin_status_update_deserialize, { + CCoinJoinStatusUpdate obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_coinjoin_accept_deserialize, { + CCoinJoinAccept obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_coinjoin_entry_deserialize, { + CCoinJoinEntry obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_coinjoin_queue_deserialize, { + CCoinJoinQueue obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_DESERIALIZE(dash_coinjoin_broadcast_tx_deserialize, { + CCoinJoinBroadcastTx obj; + DashDeserializeFromFuzzingInput(buffer, obj); +}) diff --git a/src/test/fuzz/roundtrip_dash.cpp b/src/test/fuzz/roundtrip_dash.cpp new file mode 100644 index 000000000000..bc5928410aea --- /dev/null +++ b/src/test/fuzz/roundtrip_dash.cpp @@ -0,0 +1,187 @@ +// Copyright (c) 2026 The Dash Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +// Roundtrip (deserialize -> serialize -> deserialize -> serialize) fuzz targets +// for Dash-specific types. + +#include +#include + +#include +#include + +// evo/ types +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// llmq/ types +#include +#include +#include +#include +#include +#include + +// governance/ types +#include + +// bls/ types +#include + +// coinjoin/ types +#include + +#include +#include +#include + +namespace { + +const BasicTestingSetup* g_setup; + +struct dash_invalid_fuzzing_input_exception : public std::exception { +}; + +template +void DashRoundtripFromFuzzingInput(FuzzBufferType buffer, T& obj) +{ + CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION); + try { + int version; + ds >> version; + ds.SetVersion(version); + } catch (const std::ios_base::failure&) { + throw dash_invalid_fuzzing_input_exception(); + } + + try { + ds >> obj; + } catch (const std::ios_base::failure&) { + throw dash_invalid_fuzzing_input_exception(); + } + + CDataStream sink(SER_NETWORK, ds.GetVersion()); + sink << obj; + assert(!sink.empty()); + + CDataStream ds2(SER_NETWORK, ds.GetVersion()); + ds2 << obj; + + T obj2 = obj; + CDataStream ds3(Span{ds2}, SER_NETWORK, ds.GetVersion()); + try { + ds3 >> obj2; + } catch (const std::ios_base::failure&) { + assert(false); + } + + CDataStream ds4(SER_NETWORK, ds.GetVersion()); + ds4 << obj2; + + assert(MakeByteSpan(ds2) == MakeByteSpan(ds4)); +} + +template +void DashRoundtripFromFuzzingInput(FuzzBufferType buffer) +{ + T obj; + DashRoundtripFromFuzzingInput(buffer, obj); +} + +} // namespace + +void initialize_roundtrip_dash() +{ + static const auto testing_setup = MakeNoLogFileContext<>(); + g_setup = testing_setup.get(); +} + +#define FUZZ_TARGET_DASH_ROUNDTRIP(name, code) \ + FUZZ_TARGET(name, .init = initialize_roundtrip_dash) \ + { \ + try { \ + code \ + } catch (const dash_invalid_fuzzing_input_exception&) { \ + } \ + } + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_proreg_tx_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_proupserv_tx_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_proupreg_tx_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_prouprev_tx_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_asset_lock_payload_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_asset_unlock_payload_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_cbtx_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_credit_pool_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_deterministic_mn_roundtrip, { + CDeterministicMN obj(0 /* internalId, overwritten by deserialization */); + DashRoundtripFromFuzzingInput(buffer, obj); +}) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_dmn_state_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_dmn_state_diff_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_smn_list_entry_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_get_smn_list_diff_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_smn_list_diff_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_mnauth_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_mnhf_tx_payload_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_final_commitment_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_final_commitment_tx_payload_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_dkg_complaint_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_dkg_justification_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_dkg_premature_commitment_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_recovered_sig_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_sig_share_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_sig_ses_ann_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_sig_shares_inv_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_batched_sig_shares_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_quorum_data_request_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_get_quorum_rotation_info_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_quorum_snapshot_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_governance_object_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_bls_ies_encrypted_blob_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_bls_ies_multi_recipient_blobs_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) + +FUZZ_TARGET_DASH_ROUNDTRIP(dash_coinjoin_status_update_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_coinjoin_accept_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_coinjoin_entry_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_coinjoin_queue_roundtrip, { DashRoundtripFromFuzzingInput(buffer); }) +FUZZ_TARGET_DASH_ROUNDTRIP(dash_coinjoin_broadcast_tx_roundtrip, + { DashRoundtripFromFuzzingInput(buffer); }) From 6178538f006685e1433c95fe7f26b90f304e608b Mon Sep 17 00:00:00 2001 From: PastaClaw Date: Fri, 27 Feb 2026 12:31:17 -0600 Subject: [PATCH 3/9] test(fuzz): add BLS cryptographic operations fuzz target Fuzz target exercising Dash's BLS signature scheme: - Key generation, public key derivation, key aggregation - Signing, signature aggregation, signature verification - IES (Integrated Encryption Scheme) encrypt/decrypt roundtrip - Threshold signing and recovery - Serialization/deserialization of keys and signatures - Edge cases: zero-length messages, invalid keys, self-aggregation --- src/test/fuzz/bls_operations.cpp | 297 +++++++++++++++++++++++++++++++ 1 file changed, 297 insertions(+) create mode 100644 src/test/fuzz/bls_operations.cpp diff --git a/src/test/fuzz/bls_operations.cpp b/src/test/fuzz/bls_operations.cpp new file mode 100644 index 000000000000..3ab6f4b9591d --- /dev/null +++ b/src/test/fuzz/bls_operations.cpp @@ -0,0 +1,297 @@ +// Copyright (c) 2026 The Dash Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace { + +//! Build a CBLSSecretKey from fuzzed bytes. Returns invalid key on bad input. +CBLSSecretKey MakeSecretKey(FuzzedDataProvider& fuzzed_data_provider) +{ + auto bytes = fuzzed_data_provider.ConsumeBytes(CBLSSecretKey::SerSize); + bytes.resize(CBLSSecretKey::SerSize); + return CBLSSecretKey(bytes); +} + +//! Build a CBLSPublicKey from fuzzed bytes via deserialization. +CBLSPublicKey MakePublicKey(FuzzedDataProvider& fuzzed_data_provider) +{ + auto bytes = fuzzed_data_provider.ConsumeBytes(CBLSPublicKey::SerSize); + bytes.resize(CBLSPublicKey::SerSize); + CBLSPublicKey pk; + pk.SetBytes(bytes, fuzzed_data_provider.ConsumeBool()); + return pk; +} + +//! Build a CBLSSignature from fuzzed bytes via deserialization. +CBLSSignature MakeSignature(FuzzedDataProvider& fuzzed_data_provider) +{ + auto bytes = fuzzed_data_provider.ConsumeBytes(CBLSSignature::SerSize); + bytes.resize(CBLSSignature::SerSize); + CBLSSignature sig; + sig.SetBytes(bytes, fuzzed_data_provider.ConsumeBool()); + return sig; +} + +uint256 MakeHash(FuzzedDataProvider& fuzzed_data_provider) +{ + auto bytes = fuzzed_data_provider.ConsumeBytes(32); + bytes.resize(32); + uint256 h; + memcpy(h.begin(), bytes.data(), 32); + return h; +} + +} // namespace + +void initialize_bls_operations() { BLSInit(); } + +FUZZ_TARGET(bls_operations, .init = initialize_bls_operations) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + // Test both legacy and basic BLS schemes. + // Intentionally mutating this global: each fuzz run is an isolated process, + // so there is no cross-run state leakage. + const bool use_legacy = fuzzed_data_provider.ConsumeBool(); + bls::bls_legacy_scheme.store(use_legacy); + + LIMITED_WHILE(fuzzed_data_provider.remaining_bytes() > 0, 32) + { + switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 10)) { + case 0: { + // Key generation from fuzzed bytes + public key derivation + CBLSSecretKey sk = MakeSecretKey(fuzzed_data_provider); + if (sk.IsValid()) { + CBLSPublicKey pk = sk.GetPublicKey(); + (void)pk.IsValid(); + (void)pk.GetHash(); + (void)pk.ToString(); + } + break; + } + case 1: { + // Signing + verification with fuzzed data + CBLSSecretKey sk = MakeSecretKey(fuzzed_data_provider); + uint256 hash = MakeHash(fuzzed_data_provider); + const bool legacy_sign = fuzzed_data_provider.ConsumeBool(); + + if (sk.IsValid()) { + CBLSSignature sig = sk.Sign(hash, legacy_sign); + if (sig.IsValid()) { + CBLSPublicKey pk = sk.GetPublicKey(); + // Should verify with matching scheme + (void)sig.VerifyInsecure(pk, hash, legacy_sign); + // May or may not verify with opposite scheme + (void)sig.VerifyInsecure(pk, hash, !legacy_sign); + } + } + break; + } + case 2: { + // Verification with completely fuzzed key/sig/hash + CBLSPublicKey pk = MakePublicKey(fuzzed_data_provider); + CBLSSignature sig = MakeSignature(fuzzed_data_provider); + uint256 hash = MakeHash(fuzzed_data_provider); + const bool legacy_verify = fuzzed_data_provider.ConsumeBool(); + + (void)sig.VerifyInsecure(pk, hash, legacy_verify); + break; + } + case 3: { + // Public key aggregation (filter to valid keys — impl access on invalid is UB) + const size_t count = fuzzed_data_provider.ConsumeIntegralInRange(0, 5); + std::vector pks; + for (size_t i = 0; i < count; i++) { + auto pk = MakePublicKey(fuzzed_data_provider); + if (pk.IsValid()) pks.push_back(pk); + } + (void)CBLSPublicKey::AggregateInsecure(pks); + break; + } + case 4: { + // Signature aggregation (filter to valid sigs) + const size_t count = fuzzed_data_provider.ConsumeIntegralInRange(0, 5); + std::vector sigs; + for (size_t i = 0; i < count; i++) { + auto sig = MakeSignature(fuzzed_data_provider); + if (sig.IsValid()) sigs.push_back(sig); + } + (void)CBLSSignature::AggregateInsecure(sigs); + break; + } + case 5: { + // Secret key aggregation (filter to valid keys) + const size_t count = fuzzed_data_provider.ConsumeIntegralInRange(0, 5); + std::vector sks; + for (size_t i = 0; i < count; i++) { + auto sk = MakeSecretKey(fuzzed_data_provider); + if (sk.IsValid()) sks.push_back(sk); + } + (void)CBLSSecretKey::AggregateInsecure(sks); + break; + } + case 6: { + // Threshold secret key share — SecretKeyShare validates inputs internally + const size_t threshold = fuzzed_data_provider.ConsumeIntegralInRange(1, 4); + std::vector msk; + for (size_t i = 0; i < threshold; i++) { + msk.push_back(MakeSecretKey(fuzzed_data_provider)); + } + CBLSId id(MakeHash(fuzzed_data_provider)); + CBLSSecretKey share; + (void)share.SecretKeyShare(msk, id); + break; + } + case 7: { + // Threshold public key share — PublicKeyShare validates inputs internally + const size_t threshold = fuzzed_data_provider.ConsumeIntegralInRange(1, 4); + std::vector mpk; + for (size_t i = 0; i < threshold; i++) { + mpk.push_back(MakePublicKey(fuzzed_data_provider)); + } + CBLSId id(MakeHash(fuzzed_data_provider)); + CBLSPublicKey share; + (void)share.PublicKeyShare(mpk, id); + break; + } + case 8: { + // Signature recovery from shares — Recover validates inputs internally + const size_t count = fuzzed_data_provider.ConsumeIntegralInRange(0, 4); + std::vector sigs; + std::vector ids; + for (size_t i = 0; i < count; i++) { + sigs.push_back(MakeSignature(fuzzed_data_provider)); + ids.emplace_back(MakeHash(fuzzed_data_provider)); + } + CBLSSignature recovered; + (void)recovered.Recover(sigs, ids); + break; + } + case 9: { + // DH key exchange + CBLSSecretKey sk = MakeSecretKey(fuzzed_data_provider); + CBLSPublicKey pk = MakePublicKey(fuzzed_data_provider); + CBLSPublicKey result; + (void)result.DHKeyExchange(sk, pk); + break; + } + case 10: { + // Aggregated signature verification + const size_t count = fuzzed_data_provider.ConsumeIntegralInRange(1, 4); + std::vector pks; + std::vector hashes; + for (size_t i = 0; i < count; i++) { + auto pk = MakePublicKey(fuzzed_data_provider); + uint256 h = MakeHash(fuzzed_data_provider); + if (pk.IsValid()) { + pks.push_back(pk); + hashes.push_back(h); + } + } + if (!pks.empty()) { + CBLSSignature sig = MakeSignature(fuzzed_data_provider); + // VerifyInsecureAggregated asserts non-empty + equal sizes + (void)sig.VerifyInsecureAggregated(pks, hashes); + // VerifySecureAggregated accesses pk.impl directly + uint256 single_hash = MakeHash(fuzzed_data_provider); + (void)sig.VerifySecureAggregated(pks, single_hash); + } + break; + } + } // switch + } // while +} + +FUZZ_TARGET(bls_ies, .init = initialize_bls_operations) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + // Intentionally mutating global; see bls_operations target comment. + bls::bls_legacy_scheme.store(fuzzed_data_provider.ConsumeBool()); + + switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 2)) { + case 0: { + // CBLSIESEncryptedBlob deserialization + decrypt with fuzzed key + CBLSIESEncryptedBlob blob; + + // Build blob from fuzzed data + blob.ephemeralPubKey = MakePublicKey(fuzzed_data_provider); + auto iv_bytes = fuzzed_data_provider.ConsumeBytes(32); + iv_bytes.resize(32); + memcpy(blob.ivSeed.begin(), iv_bytes.data(), 32); + blob.data = fuzzed_data_provider.ConsumeBytes(fuzzed_data_provider.ConsumeIntegralInRange(0, 256)); + + (void)blob.IsValid(); + + size_t idx = fuzzed_data_provider.ConsumeIntegralInRange(0, 16); + (void)blob.GetIV(idx); + + CBLSSecretKey sk = MakeSecretKey(fuzzed_data_provider); + CDataStream ds(SER_NETWORK, PROTOCOL_VERSION); + (void)blob.Decrypt(idx, sk, ds); + break; + } + case 1: { + // CBLSIESMultiRecipientBlobs decrypt with fuzzed data + CBLSIESMultiRecipientBlobs multi; + multi.ephemeralPubKey = MakePublicKey(fuzzed_data_provider); + + auto iv_bytes = fuzzed_data_provider.ConsumeBytes(32); + iv_bytes.resize(32); + memcpy(multi.ivSeed.begin(), iv_bytes.data(), 32); + + const size_t blob_count = fuzzed_data_provider.ConsumeIntegralInRange(0, 8); + multi.blobs.resize(blob_count); + for (size_t i = 0; i < blob_count; i++) { + multi.blobs[i] = fuzzed_data_provider.ConsumeBytes( + fuzzed_data_provider.ConsumeIntegralInRange(0, 128)); + } + + // Try to decrypt each blob + CBLSSecretKey sk = MakeSecretKey(fuzzed_data_provider); + for (size_t i = 0; i < blob_count; i++) { + CBLSIESMultiRecipientBlobs::Blob result; + (void)multi.Decrypt(i, sk, result); + } + break; + } + case 2: { + // Roundtrip: encrypt then decrypt with matching keys + CBLSSecretKey recipient_sk = MakeSecretKey(fuzzed_data_provider); + if (!recipient_sk.IsValid()) break; + + CBLSPublicKey recipient_pk = recipient_sk.GetPublicKey(); + if (!recipient_pk.IsValid()) break; + + const size_t data_size = fuzzed_data_provider.ConsumeIntegralInRange(0, 128); + auto plaintext = fuzzed_data_provider.ConsumeBytes(data_size); + plaintext.resize(data_size); + + // Encrypt + CBLSIESMultiRecipientBlobs multi; + multi.InitEncrypt(1); + bool encrypted = multi.Encrypt(0, recipient_pk, plaintext); + if (encrypted) { + // Decrypt + CBLSIESMultiRecipientBlobs::Blob decrypted; + bool decrypted_ok = multi.Decrypt(0, recipient_sk, decrypted); + if (decrypted_ok) { + assert(decrypted == plaintext); + } + } + break; + } + } // switch +} From e5395e6fe7a1ba8094f5b383089414ddef8a1b44 Mon Sep 17 00:00:00 2001 From: PastaClaw Date: Fri, 27 Feb 2026 12:31:22 -0600 Subject: [PATCH 4/9] test(fuzz): add CoinJoin protocol fuzz targets Fuzz targets for CoinJoin mixing protocol components: - CCoinJoinBroadcastTx: broadcast message validation and relay - CCoinJoinQueue: queue entry parsing and signature checks - CCoinJoinStatusUpdate: status message deserialization - CCoinJoinEntry/AddScriptSig: entry and script handling - CCompactDenominations: denomination encoding/decoding - Message integrity checks and edge case handling --- src/test/fuzz/coinjoin.cpp | 176 +++++++++++++++++++++++++++++++++++++ 1 file changed, 176 insertions(+) create mode 100644 src/test/fuzz/coinjoin.cpp diff --git a/src/test/fuzz/coinjoin.cpp b/src/test/fuzz/coinjoin.cpp new file mode 100644 index 000000000000..ee88b6b218a5 --- /dev/null +++ b/src/test/fuzz/coinjoin.cpp @@ -0,0 +1,176 @@ +// Copyright (c) 2026-present The Dash Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace { +void initialize_coinjoin() { SelectParams(CBaseChainParams::REGTEST); } +} // namespace + +// Fuzz the CoinJoin denomination helper functions with arbitrary amounts +FUZZ_TARGET(coinjoin_denominations) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + // Test AmountToDenomination / DenominationToAmount roundtrip + const CAmount amount = fuzzed_data_provider.ConsumeIntegral(); + const int denom = CoinJoin::AmountToDenomination(amount); + if (denom > 0) { + // Valid denomination — roundtrip must be consistent + assert(CoinJoin::DenominationToAmount(denom) == amount); + assert(CoinJoin::IsDenominatedAmount(amount)); + assert(CoinJoin::IsValidDenomination(denom)); + } + + // Test DenominationToAmount with fuzzed denom values + const int fuzzed_denom = fuzzed_data_provider.ConsumeIntegral(); + const CAmount denom_amount = CoinJoin::DenominationToAmount(fuzzed_denom); + if (denom_amount > 0) { + assert(CoinJoin::IsDenominatedAmount(denom_amount)); + assert(CoinJoin::AmountToDenomination(denom_amount) == fuzzed_denom); + } + + // Test collateral amount checks + const CAmount collateral_amount = fuzzed_data_provider.ConsumeIntegral(); + (void)CoinJoin::IsCollateralAmount(collateral_amount); + + // Test priority calculation + (void)CoinJoin::CalculateAmountPriority(amount); + (void)CoinJoin::CalculateAmountPriority(collateral_amount); +} + +// Fuzz CCoinJoinQueue — deserialization + method exercising +FUZZ_TARGET(coinjoin_queue) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + CCoinJoinQueue queue; + { + CDataStream ds(fuzzed_data_provider.ConsumeRemainingBytes(), SER_NETWORK, PROTOCOL_VERSION); + try { + ds >> queue; + } catch (const std::ios_base::failure&) { + return; + } + } + + // Exercise methods on successfully deserialized queue — must not crash + (void)queue.GetHash(); + (void)queue.GetSignatureHash(); + (void)queue.ToString(); + + // Test time bounds with various times + (void)queue.IsTimeOutOfBounds(); + (void)queue.IsTimeOutOfBounds(0); + (void)queue.IsTimeOutOfBounds(queue.nTime); + // Use saturating arithmetic to avoid signed overflow UB in the test driver + const int64_t kTimeout = COINJOIN_QUEUE_TIMEOUT; + const int64_t time_plus = (queue.nTime <= std::numeric_limits::max() - kTimeout) + ? queue.nTime + kTimeout + : std::numeric_limits::max(); + const int64_t time_minus = (queue.nTime >= std::numeric_limits::min() + kTimeout) + ? queue.nTime - kTimeout + : std::numeric_limits::min(); + (void)queue.IsTimeOutOfBounds(time_plus); + (void)queue.IsTimeOutOfBounds(time_minus); + (void)queue.IsTimeOutOfBounds(std::numeric_limits::max()); + (void)queue.IsTimeOutOfBounds(std::numeric_limits::min()); +} + +// Fuzz CCoinJoinBroadcastTx — deserialization + IsValidStructure +FUZZ_TARGET(coinjoin_broadcasttx, .init = initialize_coinjoin) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + CCoinJoinBroadcastTx dstx; + { + CDataStream ds(fuzzed_data_provider.ConsumeRemainingBytes(), SER_NETWORK, PROTOCOL_VERSION); + try { + ds >> dstx; + } catch (const std::ios_base::failure&) { + return; + } + } + + // Exercise methods — must not crash + (void)dstx.IsValidStructure(); + (void)dstx.GetSignatureHash(); + (void)static_cast(dstx); +} + +// Fuzz CCoinJoinEntry::AddScriptSig with fuzzed inputs +FUZZ_TARGET(coinjoin_entry_addscriptsig) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + // Build a CCoinJoinEntry with fuzzed DSIn entries + CCoinJoinEntry entry; + const size_t num_inputs = fuzzed_data_provider.ConsumeIntegralInRange(0, 20); + for (size_t i = 0; i < num_inputs; ++i) { + CTxDSIn dsin; + // Fuzz the outpoint + uint256 hash; + auto hash_bytes = fuzzed_data_provider.ConsumeBytes(32); + if (hash_bytes.size() == 32) { + memcpy(hash.begin(), hash_bytes.data(), 32); + } + dsin.prevout = COutPoint(hash, fuzzed_data_provider.ConsumeIntegral()); + dsin.nSequence = fuzzed_data_provider.ConsumeIntegral(); + dsin.fHasSig = fuzzed_data_provider.ConsumeBool(); + entry.vecTxDSIn.push_back(dsin); + } + + // Now try to AddScriptSig with fuzzed CTxIn + LIMITED_WHILE(fuzzed_data_provider.remaining_bytes() > 0, 50) + { + CTxIn txin; + uint256 hash; + auto hash_bytes = fuzzed_data_provider.ConsumeBytes(32); + if (hash_bytes.size() == 32) { + memcpy(hash.begin(), hash_bytes.data(), 32); + } + txin.prevout = COutPoint(hash, fuzzed_data_provider.ConsumeIntegral()); + txin.nSequence = fuzzed_data_provider.ConsumeIntegral(); + auto script_bytes = fuzzed_data_provider.ConsumeBytes( + fuzzed_data_provider.ConsumeIntegralInRange(0, 100)); + txin.scriptSig = CScript(script_bytes.begin(), script_bytes.end()); + + (void)entry.AddScriptSig(txin); + } +} + +// Fuzz CCoinJoinStatusUpdate — deserialization + GetMessageByID +FUZZ_TARGET(coinjoin_status_update) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + // Test with arbitrary PoolMessage values first (doesn't consume much data) + const auto raw_msg = fuzzed_data_provider.ConsumeIntegral(); + (void)CoinJoin::GetMessageByID(static_cast(raw_msg)); + + CCoinJoinStatusUpdate status; + { + CDataStream ds(fuzzed_data_provider.ConsumeRemainingBytes(), SER_NETWORK, PROTOCOL_VERSION); + try { + ds >> status; + } catch (const std::ios_base::failure&) { + return; + } + } + + // Exercise GetMessageByID with deserialized message ID — must not crash + (void)CoinJoin::GetMessageByID(status.nMessageID); +} From 3497a3c80d6fcd5493c0a1fe6e97ac61e05b460c Mon Sep 17 00:00:00 2001 From: PastaClaw Date: Fri, 27 Feb 2026 12:31:27 -0600 Subject: [PATCH 5/9] test(fuzz): add governance proposal validation fuzz target Fuzz target for Dash governance proposal validation: - CGovernanceObject construction from fuzz-driven parameters - Proposal name, URL, payment address, amount validation - Start/end epoch and superblock cycle boundary checks - Tests both valid and malformed governance object fields --- .../fuzz/governance_proposal_validator.cpp | 123 ++++++++++++++++++ 1 file changed, 123 insertions(+) create mode 100644 src/test/fuzz/governance_proposal_validator.cpp diff --git a/src/test/fuzz/governance_proposal_validator.cpp b/src/test/fuzz/governance_proposal_validator.cpp new file mode 100644 index 000000000000..a21113ddd556 --- /dev/null +++ b/src/test/fuzz/governance_proposal_validator.cpp @@ -0,0 +1,123 @@ +// Copyright (c) 2026 The Dash Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace { +std::string HexEncodeString(const std::string& input) +{ + static constexpr char HEX_DIGITS[] = "0123456789abcdef"; + std::string out; + out.reserve(input.size() * 2); + for (const unsigned char ch : input) { + out.push_back(HEX_DIGITS[ch >> 4]); + out.push_back(HEX_DIGITS[ch & 0x0f]); + } + return out; +} + +std::string SanitizeJsonString(std::string input) +{ + for (char& ch : input) { + if (ch == '"' || ch == '\\' || static_cast(ch) < 0x20) { + ch = 'x'; + } + } + return input; +} + +std::string MakeProposalJson(int64_t type, const std::string& name, int64_t start_epoch, int64_t end_epoch, + double payment_amount, const std::string& payment_address, const std::string& url) +{ + return strprintf("{\"type\":%" PRId64 ",\"name\":\"%s\",\"start_epoch\":%" PRId64 ",\"end_epoch\":%" PRId64 + ",\"payment_amount\":%.17g,\"payment_address\":\"%s\",\"url\":\"%s\"}", + type, SanitizeJsonString(name), start_epoch, end_epoch, payment_amount, + SanitizeJsonString(payment_address), SanitizeJsonString(url)); +} + +void RunValidatorCase(const std::string& hex_data, bool allow_script, bool check_expiration) +{ + try { + CProposalValidator validator(hex_data, allow_script); + (void)validator.Validate(check_expiration); + (void)validator.GetErrorMessages(); + } catch (const std::exception&) { + } catch (...) { + } +} +} // namespace + +void initialize_governance_proposal_validator() { SelectParams(CBaseChainParams::MAIN); } + +FUZZ_TARGET(governance_proposal_validator, .init = initialize_governance_proposal_validator) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + constexpr std::array kPaymentAddresses{ + "Xs7iEDx8nMwJHdiQnwvCnLBTP2sjmDGTJA", // P2PKH (mainnet) + "7XuP9xVGyvkCAfW84QJkGfbiR7dX9TYaPH", // P2SH (mainnet) + }; + + const int64_t type = fuzzed_data_provider.ConsumeBool() ? std23::to_underlying(GovernanceObject::PROPOSAL) + : fuzzed_data_provider.ConsumeIntegral(); + + const int64_t start_epoch = fuzzed_data_provider.ConsumeIntegral(); + const int64_t end_epoch = start_epoch + fuzzed_data_provider.ConsumeIntegralInRange(-4, 1024); + + double payment_amount = fuzzed_data_provider.ConsumeFloatingPointInRange(-1000.0, 1000.0); + if (fuzzed_data_provider.ConsumeBool()) { + payment_amount = 1.0; + } + + std::string random_name = fuzzed_data_provider.ConsumeRandomLengthString(96); + std::string random_url = fuzzed_data_provider.ConsumeRandomLengthString(256); + + if (fuzzed_data_provider.ConsumeBool()) { + random_name = "dash-proposal-" + random_name; + } + + constexpr std::array kUrls{ + "https://dash.org/proposals/1", + "http://[::1]/path", + "http://[broken/path", + "http://broken]/path", + }; + + const std::string payment_address = + fuzzed_data_provider.ConsumeBool() + ? std::string( + kPaymentAddresses[fuzzed_data_provider.ConsumeIntegralInRange(0, kPaymentAddresses.size() - 1)]) + : fuzzed_data_provider.ConsumeRandomLengthString(96); + const std::string url = fuzzed_data_provider.ConsumeBool() + ? std::string( + kUrls[fuzzed_data_provider.ConsumeIntegralInRange(0, kUrls.size() - 1)]) + : random_url; + + const std::string json_hex = HexEncodeString( + MakeProposalJson(type, random_name, start_epoch, end_epoch, payment_amount, payment_address, url)); + + const std::string malformed_json_hex = HexEncodeString("{" + fuzzed_data_provider.ConsumeRandomLengthString(128)); + const size_t oversized_payload_size = fuzzed_data_provider.ConsumeIntegralInRange(513, 2048); + const std::string oversized_hex(oversized_payload_size * 2, 'a'); + const std::string random_hex = fuzzed_data_provider.ConsumeRandomLengthString(2048); + + for (const bool allow_script : {false, true}) { + for (const bool check_expiration : {false, true}) { + RunValidatorCase(json_hex, allow_script, check_expiration); + RunValidatorCase(malformed_json_hex, allow_script, check_expiration); + RunValidatorCase(oversized_hex, allow_script, check_expiration); + RunValidatorCase(random_hex, allow_script, check_expiration); + } + } +} From 9531e45d4d852a597653a28cd6b5ce32bf992696 Mon Sep 17 00:00:00 2001 From: PastaClaw Date: Fri, 27 Feb 2026 12:31:34 -0600 Subject: [PATCH 6/9] test(fuzz): add special transaction and asset lock/unlock fuzz targets special_tx_validation.cpp: - Fuzz target for CheckSpecialTx() validation pipeline - Tests ProRegTx, ProUpServTx, ProUpRegTx, ProUpRevTx, CbTx, QcTx, MnHfTx, AssetLockTx, AssetUnlockTx - Exercises payload extraction, version checks, field validation asset_lock_unlock.cpp: - Dedicated fuzz targets for Platform asset lock/unlock transactions - CAssetLockPayload and CAssetUnlockPayload deserialization - Credit pool index interaction and withdrawal validation - Tests both raw deserialization and full TX context paths --- src/test/fuzz/asset_lock_unlock.cpp | 195 ++++++++++++++++++++++++ src/test/fuzz/special_tx_validation.cpp | 109 +++++++++++++ 2 files changed, 304 insertions(+) create mode 100644 src/test/fuzz/asset_lock_unlock.cpp create mode 100644 src/test/fuzz/special_tx_validation.cpp diff --git a/src/test/fuzz/asset_lock_unlock.cpp b/src/test/fuzz/asset_lock_unlock.cpp new file mode 100644 index 000000000000..32e5e00bd625 --- /dev/null +++ b/src/test/fuzz/asset_lock_unlock.cpp @@ -0,0 +1,195 @@ +// Copyright (c) 2026 The Dash Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include