diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 000000000..b34800a7d --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,72 @@ +codecov: + require_ci_to_pass: false + +ignore: + - "**/test_utils.rs" + - "**/test_utils/**" + - "**/tests.rs" + - "**/tests/**" + +coverage: + status: + project: + default: + target: auto + threshold: 2% + +component_management: + individual_components: + - component_id: dash-core + name: Dash Core + paths: + - dash/src/** + + - component_id: hashes + name: Hashes + paths: + - hashes/src/** + + - component_id: dash-spv + name: Dash SPV + paths: + - dash-spv/src/** + + - component_id: dash-spv-ffi + name: Dash SPV FFI + paths: + - dash-spv-ffi/src/** + + - component_id: key-wallet + name: Key Wallet + paths: + - key-wallet/src/** + + - component_id: key-wallet-ffi + name: Key Wallet FFI + paths: + - key-wallet-ffi/src/** + + - component_id: key-wallet-manager + name: Key Wallet Manager + paths: + - key-wallet-manager/src/** + + - component_id: dash-network + name: Dash Network + paths: + - dash-network/src/** + + - component_id: dash-network-ffi + name: Dash Network FFI + paths: + - dash-network-ffi/src/** + + - component_id: rpc-client + name: RPC Client + paths: + - rpc-client/src/** + + - component_id: rpc-json + name: RPC JSON + paths: + - rpc-json/src/** diff --git a/.github/scripts/coverage-diff.py b/.github/scripts/coverage-diff.py new file mode 100755 index 000000000..8b8d7c703 --- /dev/null +++ b/.github/scripts/coverage-diff.py @@ -0,0 +1,442 @@ +#!/usr/bin/env python3 +"""Coverage diff analyzer for PRs. + +Compares baseline LCOV coverage against PR coverage to detect whether +new #[test] functions actually cover previously-uncovered production lines. +Posts results as a markdown report. +""" + +import argparse +import re +import subprocess + + +def parse_lcov(filepath, *, allow_missing=False): + """Parse an LCOV file into {file: {line: hit_count}}. + + Normalizes paths to be repo-relative by stripping common CI workspace + prefixes. + """ + coverage = {} + current_file = None + + try: + with open(filepath) as f: + for line in f: + line = line.strip() + if line.startswith("SF:"): + raw_path = line[3:] + current_file = normalize_path(raw_path) + elif line.startswith("DA:") and current_file is not None: + parts = line[3:].split(",") + if len(parts) >= 2: + line_no = int(parts[0]) + hit_count = int(parts[1]) + if current_file not in coverage: + coverage[current_file] = {} + coverage[current_file][line_no] = hit_count + elif line == "end_of_record": + current_file = None + except FileNotFoundError: + if allow_missing: + return {} + raise + + return coverage + + +def normalize_path(path): + """Strip CI workspace prefix to get repo-relative path.""" + # Common CI patterns: /home/runner/work/repo/repo/src/... + # or /github/workspace/src/... + markers = ["/home/runner/work/", "/github/workspace/"] + for marker in markers: + idx = path.find(marker) + if idx != -1: + remainder = path[idx + len(marker) :] + # /home/runner/work/repo/repo/src -> strip two dirs + if marker == "/home/runner/work/": + parts = remainder.split("/", 2) + if len(parts) > 2: + return parts[2] + return remainder + + # If path is absolute but not a known CI prefix, try to make it relative + # to the repo root by finding common crate directories + for crate_dir in [ + "/dash/", + "/hashes/", + "/internals/", + "/dash-spv/", + "/dash-spv-ffi/", + "/dash-network/", + "/dash-network-ffi/", + "/key-wallet/", + "/key-wallet-ffi/", + "/key-wallet-manager/", + "/rpc-client/", + "/rpc-json/", + "/rpc-integration-test/", + "/test-utils/", + ]: + idx = path.find(crate_dir) + if idx != -1: + return path[idx + 1 :] # strip leading / + + return path + + +def is_production_file(filepath): + """Return True if the file is production code (not test code).""" + normalized = filepath.replace("\\", "/") + parts = normalized.split("/") + basename = parts[-1] + + # Exclude test files + if any(p == "tests" for p in parts): + return False + if basename in {"tests.rs", "test_utils.rs"}: + return False + if normalized.endswith("_test.rs") or normalized.endswith("_tests.rs"): + return False + if any(p in {"test-utils", "test_utils"} for p in parts): + return False + # Exclude benchmark files + if any(p == "benches" for p in parts): + return False + + return normalized.endswith(".rs") + + +def detect_new_tests(base_ref): + """Detect new #[test] functions added in this PR. + + Returns list of (file, function_name) tuples. + """ + try: + result = subprocess.run( + [ + "git", + "diff", + f"origin/{base_ref}...HEAD", + "--unified=0", + "--", + "*.rs", + ], + capture_output=True, + text=True, + check=True, + ) + except (subprocess.CalledProcessError, FileNotFoundError): + return [] + + new_tests = [] + current_file = None + saw_test_attr = False + + for line in result.stdout.splitlines(): + # Track current file + if line.startswith("diff --git"): + match = re.search(r"b/(.+)$", line) + if match: + current_file = match.group(1) + saw_test_attr = False + continue + + # Only look at added lines + if not line.startswith("+") or line.startswith("+++"): + if line.startswith("-") or line.startswith("@@"): + continue + # Context lines or other non-added lines reset test attr tracking + # only if they contain actual code + continue + + added = line[1:].strip() + + if "#[test]" in added or "#[tokio::test]" in added: + saw_test_attr = True + continue + + if saw_test_attr: + fn_match = re.match( + r"(?:pub(?:\([^)]*\))?\s+)?(?:async\s+)?fn\s+(\w+)", + added, + ) + if fn_match and current_file: + new_tests.append((current_file, fn_match.group(1))) + saw_test_attr = False + continue + + # Reset if we see a non-empty, non-attribute line between #[test] and fn + if saw_test_attr and added and not added.startswith("#[") and not added.startswith("//"): + saw_test_attr = False + + return new_tests + + +def compute_coverage_diff(baseline, pr): + """Compute coverage difference between baseline and PR. + + Returns: + baseline_stats: (covered, total) for production files + pr_stats: (covered, total) for production files + newly_covered: {file: [line_numbers]} - lines covered in PR but not baseline + newly_uncovered: {file: [line_numbers]} - lines covered in baseline but not PR + """ + # Collect all production files from both + all_files = set() + for f in baseline: + if is_production_file(f): + all_files.add(f) + for f in pr: + if is_production_file(f): + all_files.add(f) + + baseline_covered = 0 + baseline_total = 0 + pr_covered = 0 + pr_total = 0 + newly_covered = {} + newly_uncovered = {} + + for f in sorted(all_files): + base_lines = baseline.get(f, {}) + pr_lines = pr.get(f, {}) + all_lines = set(base_lines.keys()) | set(pr_lines.keys()) + + for line_no in all_lines: + base_hit = base_lines.get(line_no, 0) + pr_hit = pr_lines.get(line_no, 0) + + if line_no in base_lines: + baseline_total += 1 + if base_hit > 0: + baseline_covered += 1 + + if line_no in pr_lines: + pr_total += 1 + if pr_hit > 0: + pr_covered += 1 + + # Newly covered: not covered in baseline (or absent), covered in PR + if pr_hit > 0 and base_hit == 0: + if f not in newly_covered: + newly_covered[f] = [] + newly_covered[f].append(line_no) + + # Newly uncovered: covered in baseline, not covered in PR + if base_hit > 0 and pr_hit == 0 and line_no in pr_lines: + if f not in newly_uncovered: + newly_uncovered[f] = [] + newly_uncovered[f].append(line_no) + + baseline_stats = (baseline_covered, baseline_total) + pr_stats = (pr_covered, pr_total) + + return baseline_stats, pr_stats, newly_covered, newly_uncovered + + +def format_pct(covered, total): + """Format coverage percentage.""" + if total == 0: + return "N/A" + return f"{covered / total * 100:.2f}%" + + +def generate_report( + new_tests, baseline_stats, pr_stats, newly_covered, newly_uncovered, baseline_available +): + """Generate markdown report.""" + lines = [] + lines.append("## Coverage Diff Report") + lines.append("") + + if not baseline_available: + lines.append( + "> **Note:** No baseline coverage data available for comparison. " + "This is expected on the first run or after cache expiry. " + "Showing PR coverage summary only." + ) + lines.append("") + pr_covered, pr_total = pr_stats + lines.append(f"**PR coverage:** {pr_covered}/{pr_total} lines ({format_pct(pr_covered, pr_total)})") + lines.append("") + if new_tests: + lines.append(f"**New test functions detected:** {len(new_tests)}") + lines.append("") + _append_test_list(lines, new_tests) + return "\n".join(lines) + + # Coverage summary + base_covered, base_total = baseline_stats + pr_covered, pr_total = pr_stats + + base_pct = base_covered / base_total * 100 if base_total > 0 else 0 + pr_pct = pr_covered / pr_total * 100 if pr_total > 0 else 0 + delta_pct = pr_pct - base_pct + + delta_sign = "+" if delta_pct >= 0 else "" + delta_str = f"{delta_sign}{delta_pct:.2f}%" + + lines.append("| Metric | Baseline | PR | Delta |") + lines.append("|--------|----------|-----|-------|") + lines.append( + f"| Production lines covered | {base_covered}/{base_total} " + f"({format_pct(base_covered, base_total)}) | {pr_covered}/{pr_total} " + f"({format_pct(pr_covered, pr_total)}) | {delta_str} |" + ) + lines.append("") + + total_newly_covered = sum(len(v) for v in newly_covered.values()) + total_newly_uncovered = sum(len(v) for v in newly_uncovered.values()) + + lines.append(f"**Newly covered production lines:** {total_newly_covered}") + if total_newly_uncovered > 0: + lines.append(f"**Newly uncovered production lines:** {total_newly_uncovered}") + lines.append("") + + # New tests analysis + if new_tests: + lines.append(f"### New test functions ({len(new_tests)})") + lines.append("") + _append_test_list(lines, new_tests) + lines.append("") + + if total_newly_covered == 0: + lines.append( + "> :warning: **Warning:** This PR adds new test functions but does not " + "cover any previously-uncovered production lines. Consider whether these " + "tests are exercising meaningful new code paths." + ) + else: + lines.append( + f"> :white_check_mark: This PR adds new tests that cover " + f"**{total_newly_covered}** previously-uncovered production line(s)." + ) + lines.append("") + + # Per-file breakdown + if newly_covered: + lines.append("
") + lines.append("Newly covered lines by file") + lines.append("") + for f in sorted(newly_covered.keys()): + file_lines = newly_covered[f] + ranges = _compress_line_ranges(file_lines) + lines.append(f"- `{f}`: {ranges} ({len(file_lines)} lines)") + lines.append("") + lines.append("
") + lines.append("") + + if newly_uncovered: + lines.append("
") + lines.append("Newly uncovered lines by file") + lines.append("") + for f in sorted(newly_uncovered.keys()): + file_lines = newly_uncovered[f] + ranges = _compress_line_ranges(file_lines) + lines.append(f"- `{f}`: {ranges} ({len(file_lines)} lines)") + lines.append("") + lines.append("
") + + return "\n".join(lines) + + +def _append_test_list(lines, new_tests): + """Append test function list, collapsing if >20.""" + if len(new_tests) > 20: + lines.append("
") + lines.append(f"{len(new_tests)} new test functions (click to expand)") + lines.append("") + + for filepath, fn_name in new_tests: + lines.append(f"- `{filepath}`: `{fn_name}`") + + if len(new_tests) > 20: + lines.append("") + lines.append("
") + + +def _compress_line_ranges(line_numbers): + """Compress [1,2,3,5,7,8,9] into '1-3, 5, 7-9'.""" + if not line_numbers: + return "" + + sorted_lines = sorted(line_numbers) + ranges = [] + start = sorted_lines[0] + end = start + + for n in sorted_lines[1:]: + if n == end + 1: + end = n + else: + ranges.append(f"{start}-{end}" if start != end else str(start)) + start = n + end = n + + ranges.append(f"{start}-{end}" if start != end else str(start)) + return ", ".join(ranges) + + +def main(): + parser = argparse.ArgumentParser(description="Coverage diff analyzer") + parser.add_argument("--baseline", required=True, help="Path to baseline LCOV file") + parser.add_argument("--pr", required=True, help="Path to PR LCOV file") + parser.add_argument("--base-ref", required=True, help="Base branch name") + parser.add_argument( + "--baseline-available", + default="false", + help="Whether baseline cache was found (true/false)", + ) + parser.add_argument("--output", required=True, help="Output markdown file path") + + args = parser.parse_args() + + baseline_available = args.baseline_available.lower() == "true" + + # Parse coverage data + pr_coverage = parse_lcov(args.pr, allow_missing=False) + + if baseline_available: + baseline_coverage = parse_lcov(args.baseline, allow_missing=True) + else: + baseline_coverage = {} + + # Detect new tests + new_tests = detect_new_tests(args.base_ref) + + # Compute diff + if baseline_available: + baseline_stats, pr_stats, newly_covered, newly_uncovered = compute_coverage_diff( + baseline_coverage, pr_coverage + ) + else: + pr_prod_covered = 0 + pr_prod_total = 0 + for f, file_lines in pr_coverage.items(): + if is_production_file(f): + for line_no, count in file_lines.items(): + pr_prod_total += 1 + if count > 0: + pr_prod_covered += 1 + pr_stats = (pr_prod_covered, pr_prod_total) + baseline_stats = (0, 0) + newly_covered = {} + newly_uncovered = {} + + # Generate report + report = generate_report( + new_tests, baseline_stats, pr_stats, newly_covered, newly_uncovered, baseline_available + ) + + # Write output + with open(args.output, "w") as f: + f.write(report) + + print(report) + print(f"\nReport written to {args.output}") + + +if __name__ == "__main__": + main() diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 000000000..abfa145f5 --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,144 @@ +name: Coverage + +on: + workflow_dispatch: + push: + branches: + - master + - 'v[0-9]*.[0-9]*-dev' + pull_request: + branches: + - master + - 'v[0-9]*.[0-9]*-dev' + +permissions: + contents: read + pull-requests: write + +concurrency: + group: coverage-${{ github.ref }} + cancel-in-progress: ${{ github.event_name != 'push' }} + +jobs: + detect-changes: + name: Detect Changes + runs-on: ubuntu-latest + outputs: + any-code: ${{ steps.filter.outputs.any-code }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + any-code: + - '**/*.rs' + - '**/Cargo.toml' + - 'Cargo.lock' + - '.codecov.yml' + - '.github/workflows/coverage.yml' + - '.github/scripts/coverage-diff.py' + + coverage: + name: Code Coverage + needs: detect-changes + if: needs.detect-changes.outputs.any-code == 'true' + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + with: + components: llvm-tools + + - name: Enable Rust cache + uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: "false" + + - name: Install cargo-llvm-cov + uses: taiki-e/install-action@cargo-llvm-cov + + - name: Generate coverage + run: cargo llvm-cov --workspace --lcov --output-path lcov.info + + - name: Upload to Codecov + uses: codecov/codecov-action@v5 + with: + files: lcov.info + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: false + override_branch: ${{ github.ref_name }} + + # Cache baseline coverage for PR comparison + - name: Cache baseline coverage + if: github.event_name == 'push' + uses: actions/cache/save@v4 + with: + path: lcov.info + key: coverage-lcov-${{ github.ref_name }}-${{ github.sha }} + + # PR coverage diff analysis + - name: Preserve PR coverage + if: github.event_name == 'pull_request' + run: cp lcov.info pr-lcov.info + + - name: Restore baseline coverage + if: github.event_name == 'pull_request' + id: baseline + uses: actions/cache/restore@v4 + with: + path: lcov.info + key: coverage-lcov-${{ github.base_ref }}-notexact + restore-keys: | + coverage-lcov-${{ github.base_ref }}- + + - name: Setup Python + if: github.event_name == 'pull_request' + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Run coverage diff analysis + if: github.event_name == 'pull_request' + env: + BASELINE_CACHE_HIT: ${{ steps.baseline.outputs.cache-matched-key != '' }} + BASE_REF: ${{ github.base_ref }} + run: | + python .github/scripts/coverage-diff.py \ + --baseline lcov.info \ + --pr pr-lcov.info \ + --base-ref "$BASE_REF" \ + --baseline-available "$BASELINE_CACHE_HIT" \ + --output coverage-report.md + + - name: Post PR comment + if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.pull_request.number }} + run: | + MARKER="" + BODY="$(cat coverage-report.md)" + + # Check for existing comment + EXISTING=$(gh api \ + "repos/${{ github.repository }}/issues/${PR_NUMBER}/comments" \ + --jq ".[] | select(.body | contains(\"${MARKER}\")) | .id" \ + | head -1) + + if [ -n "$EXISTING" ]; then + gh api \ + "repos/${{ github.repository }}/issues/comments/${EXISTING}" \ + -X PATCH \ + -f body="${MARKER} + ${BODY}" + else + gh api \ + "repos/${{ github.repository }}/issues/${PR_NUMBER}/comments" \ + -f body="${MARKER} + ${BODY}" + fi diff --git a/README.md b/README.md index f6a19356f..1f4c0b00e 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,29 @@

+| Branch | Tests | Coverage | +|--------|-------|----------| +| v0.42-dev | [![Tests](https://github.com/dashpay/rust-dashcore/actions/workflows/rust.yml/badge.svg?branch=v0.42-dev)](https://github.com/dashpay/rust-dashcore/actions) | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0)](https://codecov.io/gh/dashpay/rust-dashcore) | + +
+Per-Crate Coverage + +| Crate | Coverage | +|-------|----------| +| dash | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0&component=dash-core)](https://codecov.io/gh/dashpay/rust-dashcore/component/dash-core) | +| hashes | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0&component=hashes)](https://codecov.io/gh/dashpay/rust-dashcore/component/hashes) | +| dash-spv | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0&component=dash-spv)](https://codecov.io/gh/dashpay/rust-dashcore/component/dash-spv) | +| dash-spv-ffi | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0&component=dash-spv-ffi)](https://codecov.io/gh/dashpay/rust-dashcore/component/dash-spv-ffi) | +| key-wallet | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0&component=key-wallet)](https://codecov.io/gh/dashpay/rust-dashcore/component/key-wallet) | +| key-wallet-ffi | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0&component=key-wallet-ffi)](https://codecov.io/gh/dashpay/rust-dashcore/component/key-wallet-ffi) | +| key-wallet-manager | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0&component=key-wallet-manager)](https://codecov.io/gh/dashpay/rust-dashcore/component/key-wallet-manager) | +| dash-network | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0&component=dash-network)](https://codecov.io/gh/dashpay/rust-dashcore/component/dash-network) | +| dash-network-ffi | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0&component=dash-network-ffi)](https://codecov.io/gh/dashpay/rust-dashcore/component/dash-network-ffi) | +| rpc-client | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0&component=rpc-client)](https://codecov.io/gh/dashpay/rust-dashcore/component/rpc-client) | +| rpc-json | [![codecov](https://codecov.io/gh/dashpay/rust-dashcore/branch/v0.42-dev/graph/badge.svg?token=MG11Z9I7F0&component=rpc-json)](https://codecov.io/gh/dashpay/rust-dashcore/component/rpc-json) | + +
+ For contributors: see CONTRIBUTING.md and AGENTS.md for branch policy and commands. [Documentation](https://dashcore.readme.io/docs)