diff --git a/.claude/settings.local.json b/.claude/settings.local.json
deleted file mode 100644
index f52b501..0000000
--- a/.claude/settings.local.json
+++ /dev/null
@@ -1,27 +0,0 @@
-{
- "permissions": {
- "allow": [
- "Bash(python:*)",
- "Bash(pytest:*)",
- "Bash(pyright:*)",
- "Bash(.venv/bin/python:*)",
- "Bash(.venv/bin/pytest:*)",
- "Bash(.venv/bin/pytest tests/*)",
- "Bash(.venv/bin/pyright:*)",
- "Bash(uv run python:*)",
- "Bash(uv run pytest:*)",
- "Bash(uv run pyright:*)",
- "Bash(env BUBUS_LOG_LEVEL=DEBUG timeout 20 .venv/bin/python:*)",
- "Bash(env BUBUS_LOG_LEVEL=DEBUG timeout 20 .venv/bin/pytest:*)",
- "Bash(env BUBUS_LOG_LEVEL=DEBUG timeout 20 .venv/bin/pytest tests/*)",
- "Bash(do)",
- "Bash(done)",
- "Bash(for)",
- "Bash(echo:*)",
- "Bash(grep:*)",
- "Bash(rg:*)",
- "Bash(.venv/bin/pytest tests/test_typed_event_results.py::test_builtin_type_casting -v -s --timeout=10)"
- ],
- "deny": []
- }
-}
diff --git a/.cursor/launch.json b/.cursor/launch.json
deleted file mode 100644
index fec9446..0000000
--- a/.cursor/launch.json
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "version": "0.2.0",
- "configurations": [
- {
- "name": "Python Debugger: Current File",
- "type": "debugpy",
- "request": "launch",
- "program": "${file}",
- "justMyCode": false,
- "env": {
- "PYTHONPATH": "${workspaceFolder}"
- },
- "console": "integratedTerminal"
- },
- {
- "name": "pytest: Debug Current File",
- "type": "debugpy",
- "request": "launch",
- "module": "pytest",
- "args": [
- "${file}",
- "-v",
- "--capture=no"
- ],
- "console": "integratedTerminal",
- "justMyCode": false
- }
- ]
-}
diff --git a/.cursor/rules/bubus.mdc b/.cursor/rules/bubus.mdc
deleted file mode 100644
index b6ecb6a..0000000
--- a/.cursor/rules/bubus.mdc
+++ /dev/null
@@ -1,5 +0,0 @@
----
-description:
-globs:
-alwaysApply: true
----
diff --git a/.cursor/settings.json b/.cursor/settings.json
deleted file mode 100644
index 718ae70..0000000
--- a/.cursor/settings.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "python.analysis.typeCheckingMode": "strict",
- "[python]": {
- "editor.defaultFormatter": "charliermarsh.ruff",
- "editor.formatOnSave": true,
- "editor.codeActionsOnSave": {
- "source.fixAll": "explicit",
- "source.organizeImports": "explicit"
- }
- },
- "python.analysis.inlayHints.variableTypes": false
-}
diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml
new file mode 100644
index 0000000..30bcfcb
--- /dev/null
+++ b/.github/workflows/publish-npm.yml
@@ -0,0 +1,52 @@
+name: publish-npm
+
+on:
+ release:
+ types: [published]
+ workflow_dispatch:
+ inputs:
+ tag:
+ description: npm dist-tag to publish under
+ required: false
+ default: latest
+
+permissions:
+ contents: read
+ id-token: write
+
+jobs:
+ publish_to_npm:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: bubus-ts
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: pnpm/action-setup@v4
+ with:
+ version: 10
+
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 22
+ cache: pnpm
+ cache-dependency-path: bubus-ts/pnpm-lock.yaml
+ registry-url: https://registry.npmjs.org
+
+ - run: pnpm install --frozen-lockfile
+ - run: pnpm run typecheck
+ - run: pnpm test
+ - run: pnpm run build
+
+ - name: Publish release tag
+ if: github.event_name == 'release'
+ run: pnpm publish --access public --no-git-checks
+ env:
+ NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
+
+ - name: Publish manual tag
+ if: github.event_name == 'workflow_dispatch'
+ run: pnpm publish --access public --tag "${{ inputs.tag }}" --no-git-checks
+ env:
+ NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
deleted file mode 100644
index 7a1b98e..0000000
--- a/.github/workflows/test.yaml
+++ /dev/null
@@ -1,144 +0,0 @@
-name: test
-permissions:
- actions: read
- contents: write
- pull-requests: write # Allow writing comments on PRs
- issues: write # Allow writing comments on issues
- statuses: write # Allow writing statuses on PRs
- discussions: write
-
-on:
- push:
- branches:
- - main
- - stable
- - 'releases/**'
- tags:
- - '*'
- pull_request:
- workflow_dispatch:
-
-jobs:
- find_tests:
- runs-on: ubuntu-latest
- outputs:
- TEST_FILENAMES: ${{ steps.lsgrep.outputs.TEST_FILENAMES }}
- # ["test_eventbus", ...]
- steps:
- - uses: actions/checkout@v4
- - id: lsgrep
- run: |
- TEST_FILENAMES="$(ls tests/test_*.py | sed 's|^tests/||' | sed 's|\.py$||' | jq -R -s -c 'split("\n")[:-1]')"
- echo "TEST_FILENAMES=${TEST_FILENAMES}" >> "$GITHUB_OUTPUT"
- echo "$TEST_FILENAMES"
- # https://code.dblock.org/2021/09/03/generating-task-matrix-by-looping-over-repo-files-with-github-actions.html
- - name: Check that at least one test file is found
- run: |
- if [ -z "${{ steps.lsgrep.outputs.TEST_FILENAMES }}" ]; then
- echo "Failed to find any test_*.py files in tests/ folder!" > /dev/stderr
- exit 1
- fi
-
- tests:
- needs: find_tests
- runs-on: ubuntu-latest
- env:
- IN_DOCKER: 'True'
- strategy:
- matrix:
- test_filename: ${{ fromJson(needs.find_tests.outputs.TEST_FILENAMES || '["FAILED_TO_DISCOVER_TESTS"]') }}
- # autodiscovers all the files in tests/test_*.py
- # - test_eventbus
- # ... and more
- name: ${{ matrix.test_filename }}
- steps:
- - name: Check that the previous step managed to find some test files for us to run
- run: |
- if [[ "${{ matrix.test_filename }}" == "FAILED_TO_DISCOVER_TESTS" ]]; then
- echo "Failed get list of test files in tests/test_*.py from find_tests job" > /dev/stderr
- exit 1
- fi
-
- - uses: actions/checkout@v4
- - uses: astral-sh/setup-uv@v6
- with:
- enable-cache: true
- activate-environment: true
-
- - run: uv sync --dev --all-extras
-
- - run: pytest -x tests/${{ matrix.test_filename }}.py --cov=bubus --cov-report=term
-
- - name: Check coverage files
- run: |
- echo "Looking for coverage files..."
- ls -la .coverage* 2>/dev/null || ls -la | grep coverage || echo "No coverage files found"
- if [ -f .coverage ]; then
- echo "Found .coverage file, size: $(stat -f%z .coverage 2>/dev/null || stat -c%s .coverage) bytes"
- fi
-
- - name: Upload coverage data
- uses: actions/upload-artifact@v4
- with:
- name: coverage-${{ matrix.test_filename }}
- path: .coverage
- retention-days: 7
- include-hidden-files: true
- if: always()
-
- coverage:
- needs: tests
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - uses: astral-sh/setup-uv@v6
- with:
- enable-cache: true
- activate-environment: true
-
- - run: uv sync --dev --all-extras
-
- - name: Download all coverage data
- uses: actions/download-artifact@v4
- with:
- pattern: coverage-*
- path: coverage-data/
-
- - name: Combine coverage data
- run: |
- # Find all .coverage files and copy them with unique names
- counter=1
- for coverage_file in $(find coverage-data -name ".coverage" -type f); do
- cp "$coverage_file" ".coverage.$counter"
- counter=$((counter + 1))
- done
-
- - name: Combine coverage & fail if it's <80%
- run: |
- uv tool install 'coverage[toml]'
-
- coverage combine
- coverage html --skip-covered --skip-empty
-
- # Report and write to summary.
- coverage report --format=markdown >> $GITHUB_STEP_SUMMARY
-
- # Report again and fail if under 80%.
- coverage report --fail-under=80
-
- - name: Upload combined coverage report
- uses: actions/upload-artifact@v4
- with:
- name: coverage-report
- path: |
- htmlcov/
- coverage.xml
- retention-days: 7
-
- - name: Upload coverage to Codecov (optional)
- uses: codecov/codecov-action@v4
- with:
- file: ./coverage.xml
- fail_ci_if_error: false
- continue-on-error: true
diff --git a/.github/workflows/test_py.yaml b/.github/workflows/test_py.yaml
new file mode 100644
index 0000000..165900a
--- /dev/null
+++ b/.github/workflows/test_py.yaml
@@ -0,0 +1,210 @@
+name: test-py
+permissions:
+ actions: read
+ contents: write
+ pull-requests: write # Allow writing comments on PRs
+ issues: write # Allow writing comments on issues
+ statuses: write # Allow writing statuses on PRs
+ discussions: write
+
+on:
+ push:
+ branches:
+ - main
+ - stable
+ - 'releases/**'
+ tags:
+ - '*'
+ pull_request:
+ workflow_dispatch:
+
+jobs:
+ lint_py:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: astral-sh/setup-uv@v6
+ with:
+ enable-cache: true
+ activate-environment: true
+
+ - run: uv sync --dev --all-extras
+ - run: uv run ruff format --check
+ - run: uv run ruff check
+ - run: uv run pyright
+
+ find_py_tests:
+ runs-on: ubuntu-latest
+ outputs:
+ PY_TASKS: ${{ steps.lsgrep.outputs.PY_TASKS }}
+ # [{ "kind": "test" | "example", "name": "test_eventbus" }, ...]
+ PY_TEST_TASKS: ${{ steps.lsgrep.outputs.PY_TEST_TASKS }}
+ # [{ "kind": "test", "name": "test_eventbus" }, ...]
+ steps:
+ - uses: actions/checkout@v4
+ - id: lsgrep
+ run: |
+ PY_TEST_TASKS="$(
+ find tests -maxdepth 1 -type f -name 'test_*.py' \
+ | sort \
+ | sed 's|^tests/||' \
+ | sed 's|\.py$||' \
+ | jq -R -s -c 'split("\n")[:-1] | map({kind: "test", name: .})'
+ )"
+ PY_EXAMPLE_TASKS="$(
+ (
+ if [[ -d examples ]]; then
+ find examples -maxdepth 1 -type f -name '*.py' | sort
+ fi
+ ) \
+ | sed 's|^examples/||' \
+ | sed 's|\.py$||' \
+ | jq -R -s -c 'split("\n")[:-1] | map({kind: "example", name: .})'
+ )"
+ PY_TASKS="$(jq -cn --argjson tests "$PY_TEST_TASKS" --argjson examples "$PY_EXAMPLE_TASKS" '$tests + $examples')"
+
+ echo "PY_TEST_TASKS=${PY_TEST_TASKS}" >> "$GITHUB_OUTPUT"
+ echo "PY_TASKS=${PY_TASKS}" >> "$GITHUB_OUTPUT"
+ echo "$PY_TASKS"
+ # https://code.dblock.org/2021/09/03/generating-task-matrix-by-looping-over-repo-files-with-github-actions.html
+ - name: Check that at least one test file is found
+ run: |
+ if [[ -z "${{ steps.lsgrep.outputs.PY_TEST_TASKS }}" || "${{ steps.lsgrep.outputs.PY_TEST_TASKS }}" == "[]" ]]; then
+ echo "Failed to find any test_*.py files in tests/ folder!" > /dev/stderr
+ exit 1
+ fi
+
+ tests:
+ needs:
+ - lint_py
+ - find_py_tests
+ runs-on: ubuntu-latest
+ env:
+ IN_DOCKER: 'True'
+ strategy:
+ matrix:
+ task: ${{ fromJson(needs.find_py_tests.outputs.PY_TASKS || '[{"kind":"error","name":"FAILED_TO_DISCOVER_TASKS"}]') }}
+ # autodiscovers files in tests/test_*.py and examples/*.py
+ # - { kind: "test", name: "test_eventbus" }
+ # - { kind: "example", name: "quickstart" }
+ # ... and more
+ name: ${{ matrix.task.kind }}-${{ matrix.task.name }}
+ steps:
+ - name: Check that the previous step managed to find some tasks for us to run
+ run: |
+ if [[ "${{ matrix.task.kind }}" == "error" ]]; then
+ echo "Failed get list of tasks in tests/test_*.py and examples/*.py from find_py_tests job" > /dev/stderr
+ exit 1
+ fi
+
+ - uses: actions/checkout@v4
+ - uses: astral-sh/setup-uv@v6
+ with:
+ enable-cache: true
+ activate-environment: true
+
+ - run: uv sync --dev --all-extras
+
+ - name: Run test with coverage
+ if: matrix.task.kind == 'test'
+ run: uv run coverage run --parallel-mode --source=bubus -m pytest -x tests/${{ matrix.task.name }}.py
+
+ - name: Run example
+ if: matrix.task.kind == 'example'
+ run: uv run coverage run --parallel-mode --source=bubus examples/${{ matrix.task.name }}.py
+
+ - name: Check coverage files
+ if: always()
+ run: |
+ echo "Looking for coverage files..."
+ ls -la .coverage* 2>/dev/null || ls -la | grep coverage || echo "No coverage files found"
+ coverage_file="$(find . -maxdepth 1 -type f -name '.coverage*' | head -n 1)"
+ if [ -n "$coverage_file" ]; then
+ echo "Found coverage file ($coverage_file), size: $(stat -f%z "$coverage_file" 2>/dev/null || stat -c%s "$coverage_file") bytes"
+ fi
+
+ - name: Upload coverage data
+ uses: actions/upload-artifact@v4
+ with:
+ name: coverage-${{ matrix.task.kind }}-${{ matrix.task.name }}
+ path: |
+ .coverage*
+ pyproject.toml
+ retention-days: 7
+ include-hidden-files: true
+ if: always()
+
+ coverage:
+ needs: tests
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: astral-sh/setup-uv@v6
+ with:
+ enable-cache: true
+ activate-environment: true
+
+ - run: uv sync --dev --all-extras
+
+ - name: Download all coverage data
+ uses: actions/download-artifact@v4
+ with:
+ pattern: coverage-*
+ path: coverage-data/
+
+ - name: Combine coverage data
+ run: |
+ # Find all .coverage* files and copy them with unique names
+ counter=1
+ for coverage_file in $(find coverage-data -name ".coverage*" -type f); do
+ cp "$coverage_file" ".coverage.$counter"
+ counter=$((counter + 1))
+ done
+
+ - name: Combine coverage & fail if it's <50%
+ run: |
+ uv tool install 'coverage[toml]'
+ OMIT='bubus/bridge*.py'
+
+ coverage combine
+ coverage html --skip-covered --skip-empty --omit="$OMIT"
+ coverage xml --omit="$OMIT"
+
+ echo "### Python combined coverage" >> "$GITHUB_STEP_SUMMARY"
+ echo "" >> "$GITHUB_STEP_SUMMARY"
+ # Report and write a markdown table to summary.
+ coverage report --omit="$OMIT" --format=markdown >> $GITHUB_STEP_SUMMARY
+
+ # Report again and fail if under 50%.
+ coverage report --omit="$OMIT" --fail-under=50
+
+ - name: Upload combined coverage report
+ id: upload_py_coverage_report
+ uses: actions/upload-artifact@v4
+ with:
+ name: coverage-report
+ path: |
+ htmlcov/
+ coverage.xml
+ pyproject.toml
+ retention-days: 7
+
+ - name: Append Python coverage artifact link
+ run: |
+ echo "" >> "$GITHUB_STEP_SUMMARY"
+ echo "[Download Python HTML coverage artifact (coverage-report)](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/artifacts/${{ steps.upload_py_coverage_report.outputs.artifact-id }})" >> "$GITHUB_STEP_SUMMARY"
+
+ perf:
+ needs: coverage
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: astral-sh/setup-uv@v6
+ with:
+ enable-cache: true
+ activate-environment: true
+
+ - run: uv sync --dev --all-extras
+ - run: uv run perf
diff --git a/.github/workflows/test_ts.yaml b/.github/workflows/test_ts.yaml
new file mode 100644
index 0000000..5a927c3
--- /dev/null
+++ b/.github/workflows/test_ts.yaml
@@ -0,0 +1,279 @@
+name: test-ts
+
+on:
+ push:
+ branches:
+ - main
+ - stable
+ - 'releases/**'
+ tags:
+ - '*'
+ pull_request:
+ workflow_dispatch:
+
+jobs:
+ lint_ts:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: bubus-ts
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: pnpm/action-setup@v4
+ with:
+ version: 10
+
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 22
+ cache: pnpm
+ cache-dependency-path: bubus-ts/pnpm-lock.yaml
+
+ - run: pnpm install --frozen-lockfile
+ - run: pnpm exec prettier --check .
+ - run: pnpm exec eslint .
+ - run: pnpm run typecheck
+
+ find_ts_tests:
+ runs-on: ubuntu-latest
+ outputs:
+ TS_TASKS: ${{ steps.lsgrep.outputs.TS_TASKS }}
+ # [{ "kind": "test" | "example", "name": "eventbus_basics" }, ...]
+ TS_TEST_TASKS: ${{ steps.lsgrep.outputs.TS_TEST_TASKS }}
+ # [{ "kind": "test", "name": "eventbus_basics" }, ...]
+ steps:
+ - uses: actions/checkout@v4
+ - id: lsgrep
+ run: |
+ TS_TEST_TASKS="$(
+ find bubus-ts/tests -maxdepth 1 -type f -name '*.test.ts' \
+ | sort \
+ | sed 's|^bubus-ts/tests/||' \
+ | sed 's|\.test\.ts$||' \
+ | jq -R -s -c 'split("\n")[:-1] | map({kind: "test", name: .})'
+ )"
+ TS_EXAMPLE_TASKS="$(
+ (
+ if [[ -d bubus-ts/examples ]]; then
+ find bubus-ts/examples -maxdepth 1 -type f -name '*.ts' | sort
+ fi
+ ) \
+ | sed 's|^bubus-ts/examples/||' \
+ | sed 's|\.ts$||' \
+ | jq -R -s -c 'split("\n")[:-1] | map({kind: "example", name: .})'
+ )"
+ TS_TASKS="$(jq -cn --argjson tests "$TS_TEST_TASKS" --argjson examples "$TS_EXAMPLE_TASKS" '$tests + $examples')"
+
+ echo "TS_TEST_TASKS=${TS_TEST_TASKS}" >> "$GITHUB_OUTPUT"
+ echo "TS_TASKS=${TS_TASKS}" >> "$GITHUB_OUTPUT"
+ echo "$TS_TASKS"
+ - name: Check that at least one test file is found
+ run: |
+ if [[ -z "${{ steps.lsgrep.outputs.TS_TEST_TASKS }}" || "${{ steps.lsgrep.outputs.TS_TEST_TASKS }}" == "[]" ]]; then
+ echo "Failed to find any *.test.ts files in bubus-ts/tests/ folder!" > /dev/stderr
+ exit 1
+ fi
+
+ tests:
+ needs:
+ - lint_ts
+ - find_ts_tests
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ task: ${{ fromJson(needs.find_ts_tests.outputs.TS_TASKS || '[{"kind":"error","name":"FAILED_TO_DISCOVER_TASKS"}]') }}
+ # autodiscovers all files in bubus-ts/tests/*.test.ts and bubus-ts/examples/*.ts
+ # - { kind: "test", name: "eventbus_basics" }
+ # - { kind: "example", name: "simple" }
+ # ... and more
+ name: ts-${{ matrix.task.kind }}-${{ matrix.task.name }}
+ defaults:
+ run:
+ working-directory: bubus-ts
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Check that the previous step managed to find some tasks for us to run
+ run: |
+ if [[ "${{ matrix.task.kind }}" == "error" ]]; then
+ echo "Failed get list of tasks from find_ts_tests job" > /dev/stderr
+ exit 1
+ fi
+
+ - uses: pnpm/action-setup@v4
+ with:
+ version: 10
+
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 22
+ cache: pnpm
+ cache-dependency-path: bubus-ts/pnpm-lock.yaml
+
+ - run: pnpm install --frozen-lockfile
+ - name: Prepare coverage directory
+ run: |
+ rm -rf .v8-coverage
+ mkdir -p .v8-coverage
+ - name: Run test with coverage
+ if: matrix.task.kind == 'test'
+ run: NODE_V8_COVERAGE=.v8-coverage NODE_OPTIONS='--expose-gc' node --expose-gc --test --experimental-test-coverage --import tsx tests/${{ matrix.task.name }}.test.ts
+ - name: Run example
+ if: matrix.task.kind == 'example'
+ run: NODE_V8_COVERAGE=.v8-coverage NODE_OPTIONS='--expose-gc' node --expose-gc --import tsx examples/${{ matrix.task.name }}.ts
+ - name: Upload raw coverage data
+ uses: actions/upload-artifact@v4
+ with:
+ name: ts-coverage-${{ matrix.task.kind }}-${{ matrix.task.name }}
+ path: |
+ bubus-ts/.v8-coverage
+ pyproject.toml
+ retention-days: 7
+ include-hidden-files: true
+ if: always()
+
+ coverage:
+ needs: tests
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: pnpm/action-setup@v4
+ with:
+ version: 10
+
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 22
+ cache: pnpm
+ cache-dependency-path: bubus-ts/pnpm-lock.yaml
+
+ - run: cd bubus-ts && pnpm install --frozen-lockfile
+
+ - name: Download all coverage data
+ uses: actions/download-artifact@v4
+ with:
+ pattern: ts-coverage-*
+ path: coverage-data/
+
+ - name: Combine coverage data
+ run: |
+ mkdir -p bubus-ts/.v8-coverage-merged
+
+ counter=1
+ while IFS= read -r -d '' coverage_file; do
+ cp "$coverage_file" "bubus-ts/.v8-coverage-merged/$counter-$(basename "$coverage_file")"
+ counter=$((counter + 1))
+ done < <(find coverage-data -type f -name "*.json" -print0)
+
+ if [[ "$counter" -eq 1 ]]; then
+ echo "No V8 coverage JSON files found in downloaded artifacts" > /dev/stderr
+ exit 1
+ fi
+
+ - name: Build merged coverage report
+ run: |
+ cd bubus-ts
+ set -o pipefail
+ mkdir -p coverage
+ pnpm dlx c8 report \
+ --temp-directory .v8-coverage-merged \
+ --report-dir coverage \
+ --reporter=html \
+ --reporter=text \
+ --reporter=json-summary \
+ --exclude-after-remap \
+ -n 'src/**/*.ts' \
+ -x 'src/bridge*.ts' \
+ -x 'src/optional_deps.ts' | tee coverage/text-report.txt
+
+ node <<'NODE'
+ const fs = require('fs');
+ const summaryPath = 'coverage/coverage-summary.json';
+ const summary = JSON.parse(fs.readFileSync(summaryPath, 'utf8'));
+ const entries = Object.entries(summary);
+ const total = summary.total;
+ const files = entries
+ .filter(([name]) => name !== 'total')
+ .sort((a, b) => String(a[0]).localeCompare(String(b[0])));
+
+ const esc = (s) => String(s).replace(/\|/g, '\\|');
+ const row = (name, m) => {
+ const stmtsTotal = Number(m.statements.total || 0);
+ const stmtsCovered = Number(m.statements.covered || 0);
+ const stmtsMiss = Math.max(stmtsTotal - stmtsCovered, 0);
+ return `| ${esc(name)} | ${stmtsTotal} | ${stmtsMiss} | ${Number(m.statements.pct || 0).toFixed(2)}% | ${Number(m.branches.pct || 0).toFixed(2)}% | ${Number(m.functions.pct || 0).toFixed(2)}% | ${Number(m.lines.pct || 0).toFixed(2)}% |`;
+ };
+
+ const lines = [];
+ lines.push('### TypeScript combined coverage');
+ lines.push('');
+ lines.push('| Name | Stmts | Miss | Cover | Branch | Funcs | Lines |');
+ lines.push('| --- | ---: | ---: | ---: | ---: | ---: | ---: |');
+ lines.push(row('TOTAL', total));
+ for (const [name, metrics] of files) {
+ lines.push(row(name, metrics));
+ }
+ lines.push('');
+
+ const summaryFile = process.env.GITHUB_STEP_SUMMARY;
+ fs.appendFileSync(summaryFile, lines.join('\n'));
+ NODE
+
+ - name: Fail if TypeScript coverage is <50%
+ run: |
+ cd bubus-ts
+ pnpm dlx c8 report \
+ --temp-directory .v8-coverage-merged \
+ --reporter=text-summary \
+ --exclude-after-remap \
+ -n 'src/**/*.ts' \
+ -x 'src/bridge*.ts' \
+ -x 'src/optional_deps.ts' \
+ --check-coverage \
+ --lines 50 > /dev/null
+
+ - name: Upload merged coverage report
+ id: upload_ts_coverage_report
+ uses: actions/upload-artifact@v4
+ with:
+ name: ts-coverage-report
+ path: |
+ bubus-ts/coverage/
+ pyproject.toml
+ retention-days: 7
+
+ - name: Append TypeScript coverage artifact link
+ run: |
+ echo "" >> "$GITHUB_STEP_SUMMARY"
+ echo "[Download TypeScript HTML coverage artifact (ts-coverage-report)](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/artifacts/${{ steps.upload_ts_coverage_report.outputs.artifact-id }})" >> "$GITHUB_STEP_SUMMARY"
+
+ perf:
+ needs: coverage
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: bubus-ts
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: pnpm/action-setup@v4
+ with:
+ version: 10
+
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 22
+ cache: pnpm
+ cache-dependency-path: bubus-ts/pnpm-lock.yaml
+
+ - uses: oven-sh/setup-bun@v2
+
+ - uses: denoland/setup-deno@v2
+ with:
+ deno-version: v2.x
+
+ - run: pnpm install --frozen-lockfile
+ - run: npx --yes --package=playwright playwright install chromium
+ - run: pnpm run perf
diff --git a/.gitignore b/.gitignore
index 6d5adec..8960285 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,6 +20,7 @@ CLAUDE.local.md
# Build files
dist/
+node_modules/
# Coverage files
.coverage
@@ -27,7 +28,7 @@ dist/
htmlcov/
coverage.xml
*.cover
-
+*.sqlite*
# Secrets and sensitive files
secrets.env
diff --git a/LICENSE b/LICENSE
index e8bde14..bb828de 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2025 Browser Use
+Copyright (c) 2025 bbus contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/README.md b/README.md
index afd7ed8..2d51af3 100644
--- a/README.md
+++ b/README.md
@@ -1,21 +1,53 @@
-# `bubus`: π’ Production-ready event bus library for Python
+# `bubus`: π’ Production-ready multi-language event bus
-Bubus is a fully-featured, Pydantic-powered event bus library for async Python.
+
-It's designed for quickly building event-driven applications with Python in a way that "just works" with async support, proper support for nested events, and real concurrency control.
+[](https://deepwiki.com/pirate/bbus) [](https://pypi.org/project/bubus/) [](https://github.com/pirate/bbus) 
-It provides a [pydantic](https://docs.pydantic.dev/latest/)-based API for implementing publish-subscribe patterns with type safety, async/sync handler support, and advanced features like event forwarding between buses.
+[](https://deepwiki.com/pirate/bbus/3-typescript-implementation) [](https://www.npmjs.com/package/bubus)
-βΎοΈ It's inspired by the simplicity of async and events in `JS`, we aim to bring a fully type-checked [`EventTarget`](https://developer.mozilla.org/en-US/docs/Web/API/EventTarget)-style API to Python.
+Bubus is an in-memory event bus library for async Python and TS (node/browser).
+
+It's designed for quickly building resilient, predictable, complex event-driven apps.
+
+It "just works" with an intuitive, but powerful event JSON format + emit API that's consistent across both languages and scales consistently from one event up to millions (~0.2ms/event):
+
+```python
+class SomeEvent(BaseEvent):
+ some_data: int
+
+def handle_some_event(event: SomeEvent):
+ print('hi!')
+
+bus.on(SomeEvent, some_function)
+await bus.emit(SomeEvent({some_data: 132}))
+# "hi!""
+```
+
+It's async native, has proper automatic nested event tracking, and powerful concurrency control options. The API is inspired by `EventEmitter` or [`emittery`](https://github.com/sindresorhus/emittery) in JS, but it takes it a step further:
+
+- nice Pydantic / Zod schemas for events that can be exchanged between both languages
+- automatic UUIDv7s and monotonic nanosecond timestamps for ordering events globally
+- built in locking options to force strict global FIFO procesing or fully parallel processing
+
+---
+
+βΎοΈ It's inspired by the simplicity of async and events in `JS` but with baked-in features that allow to eliminate most of the tedious repetitive complexity in event-driven codebases:
+
+- correct timeout enforcement across multiple levels of events, if a parent times out it correctly aborts all child event processing
+- ability to strongly type hint and enforce the return type of event handlers at compile-time
+- ability to queue events on the bus, or inline await them for immediate execution like a normal function call
+- handles thousands of events/sec/core in both languages; see the runtime matrix below for current measured numbers
+
## π’ Quickstart
Install bubus and get started with a simple event-driven application:
```bash
-pip install bubus
+pip install bubus # see ./bubus-ts/README.md for JS instructions
```
```python
@@ -28,15 +60,15 @@ class UserLoginEvent(BaseEvent[str]):
is_admin: bool
async def handle_login(event: UserLoginEvent) -> str:
- auth_request = await event.event_bus.dispatch(AuthRequestEvent(...)) # nested events supported
- auth_response = await event.event_bus.expect(AuthResponseEvent, timeout=30.0)
+ auth_request = await event.event_bus.emit(AuthRequestEvent(...)) # nested events supported
+ auth_response = await event.event_bus.find(AuthResponseEvent, child_of=auth_request, future=30)
return f"User {event.username} logged in admin={event.is_admin} with API response: {await auth_response.event_result()}"
bus = EventBus()
bus.on(UserLoginEvent, handle_login)
bus.on(AuthRequestEvent, AuthAPI.post)
-event = bus.dispatch(UserLoginEvent(username="alice", is_admin=True))
+event = bus.emit(UserLoginEvent(username="alice", is_admin=True))
print(await event.event_result())
# User alice logged in admin=True with API response: {...}
```
@@ -104,9 +136,9 @@ class SomeService:
return 'this works too'
# All usage patterns behave the same:
-bus.on(SomeEvent, SomeClass().handlers_can_be_methods)
-bus.on(SomeEvent, SomeClass.handler_can_be_classmethods)
-bus.on(SomeEvent, SomeClass.handlers_can_be_staticmethods)
+bus.on(SomeEvent, SomeService().handlers_can_be_methods)
+bus.on(SomeEvent, SomeService.handler_can_be_classmethods)
+bus.on(SomeEvent, SomeService.handlers_can_be_staticmethods)
```
@@ -154,16 +186,40 @@ auth_bus = EventBus(name='AuthBus')
data_bus = EventBus(name='DataBus')
# Share all or specific events between buses
-main_bus.on('*', auth_bus.dispatch) # if main bus gets LoginEvent, will forward to AuthBus
-auth_bus.on('*', data_bus.dispatch) # auth bus will forward everything to DataBus
-data_bus.on('*', main_bus.dispatch) # don't worry! event will only be processed once by each, no infinite loop occurs
+main_bus.on('*', auth_bus.emit) # if main bus gets LoginEvent, will forward to AuthBus
+auth_bus.on('*', data_bus.emit) # auth bus will forward everything to DataBus
+data_bus.on('*', main_bus.emit) # don't worry! event will only be processed once by each, no infinite loop occurs
# Events flow through the hierarchy with tracking
-event = main_bus.dispatch(LoginEvent())
+event = main_bus.emit(LoginEvent())
await event
-print(event.event_path) # ['MainBus', 'AuthBus', 'DataBus'] # list of buses that have already procssed the event
+print(event.event_path) # ['MainBus#ab12', 'AuthBus#cd34', 'DataBus#ef56'] # list of bus labels that already processed the event
+```
+
+
+
+### Bridges
+
+Bridges are optional extra connectors provided that allow you to send/receive events from an external service, and you do not need to use a bridge to use bubus since it's normally purely in-memory. These are just simple helpers to forward bubus events JSON to storage engines / other processes / other machines; they prevent loops automatically, but beyond that it's only basic forwarding with no handler pickling or anything fancy.
+
+Bridges all expose a very simple bus-like API with only `.emit()` and `.on()`.
+
+**Example usage: link a bus to a redis pub/sub channel**
+```python
+bridge = RedisEventBridge('redis://redis@localhost:6379')
+
+bus.on('*', bridge.emit) # listen for all events on bus and send them to redis channel
+bridge.on('*', bus.emit) # listen for new events in redis channel and emit them to our bus
```
+- `SocketEventBridge('/tmp/bubus_events.sock')`
+- `HTTPEventBridge(send_to='https://127.0.0.1:8001/bubus_events', listen_on='http://0.0.0.0:8002/bubus_events')`
+- `JSONLEventBridge('/tmp/bubus_events.jsonl')`
+- `SQLiteEventBridge('/tmp/bubus_events.sqlite3')`
+- `PostgresEventBridge('postgresql://user:pass@localhost:5432/dbname/bubus_events')`
+- `RedisEventBridge('redis://user:pass@localhost:6379/1/bubus_events')`
+- `NATSEventBridge('nats://localhost:4222', 'bubus_events')`
+
### π± Event Results Aggregation
@@ -181,7 +237,8 @@ bus.on(GetConfigEvent, load_user_config)
bus.on(GetConfigEvent, load_system_config)
# Get a merger of all dict results
-event = await bus.dispatch(GetConfigEvent())
+# (conflicting keys raise ValueError unless raise_if_conflicts=False)
+event = await bus.emit(GetConfigEvent())
config = await event.event_results_flat_dict(raise_if_conflicts=False)
# {'debug': False, 'port': 8080, 'timeout': 30}
@@ -197,22 +254,22 @@ await event.event_results_list()
Events are processed in strict FIFO order, maintaining consistency:
```python
-# Events are processed in the order they were dispatched
+# Events are processed in the order they were emitted
for i in range(10):
- bus.dispatch(ProcessTaskEvent(task_id=i))
+ bus.emit(ProcessTaskEvent(task_id=i))
# Even with async handlers, order is preserved
await bus.wait_until_idle(timeout=30.0)
```
-If a handler dispatches and awaits any child events during execution, those events will jump the FIFO queue and be processed immediately:
+If a handler emits and awaits any child events during execution, those events will jump the FIFO queue and be processed immediately:
```python
def child_handler(event: SomeOtherEvent) -> str:
return 'xzy123'
def main_handler(event: MainEvent) -> str:
# enqueue event for processing after main_handler exits
- child_event = bus.dispatch(SomeOtherEvent())
+ child_event = bus.emit(SomeOtherEvent())
# can also await child events to process immediately instead of adding to FIFO queue
completed_child_event = await child_event
@@ -221,13 +278,13 @@ def main_handler(event: MainEvent) -> str:
bus.on(SomeOtherEvent, child_handler)
bus.on(MainEvent, main_handler)
-await bus.dispatch(MainEvent()).event_result()
+await bus.emit(MainEvent()).event_result()
# result from awaiting child event: xyz123
```
-### πͺ Dispatch Nested Child Events From Handlers
+### πͺ Emit Nested Child Events From Handlers
Automatically track event relationships and causality tree:
@@ -235,15 +292,15 @@ Automatically track event relationships and causality tree:
async def parent_handler(event: BaseEvent):
# handlers can emit more events to be processed asynchronously after this handler completes
child = ChildEvent()
- child_event_async = event.event_bus.dispatch(child) # equivalent to bus.dispatch(...)
+ child_event_async = event.event_bus.emit(child) # equivalent to bus.emit(...)
assert child.event_status != 'completed'
assert child_event_async.event_parent_id == event.event_id
await child_event_async
- # or you can dispatch an event and block until it finishes processing by awaiting the event
+ # or you can emit an event and block until it finishes processing by awaiting the event
# this recursively waits for all handlers, including if event is forwarded to other buses
# (note: awaiting an event from inside a handler jumps the FIFO queue and will process it immediately, before any other pending events)
- child_event_sync = await bus.dispatch(ChildEvent())
+ child_event_sync = await bus.emit(ChildEvent())
# ChildEvent handlers run immediately
assert child_event_sync.event_status == 'completed'
@@ -254,7 +311,7 @@ async def run_main():
bus.on(ChildEvent, child_handler)
bus.on(ParentEvent, parent_handler)
- parent_event = bus.dispatch(ParentEvent())
+ parent_event = bus.emit(ParentEvent())
print(parent_event.event_children) # show all the child events emitted during handling of an event
await parent_event
print(bus.log_tree())
@@ -270,47 +327,77 @@ if __name__ == '__main__':
-### β³ Expect an Event to be Dispatched
+### π Find Events in History or Wait for Future Events
-Wait for specific events to be seen on a bus with optional filtering:
+`find()` is the single lookup API: search history, wait for future events, or combine both.
```python
-# Block until a specific event is seen (with optional timeout)
-request_event = await bus.dispatch(RequestEvent(id=123, table='invoices', request_id=999234))
-response_event = await bus.expect(ResponseEvent, timeout=30)
+# Default: non-blocking history lookup (past=True, future=False)
+existing = await bus.find(ResponseEvent)
+
+# Wait only for future matches
+future = await bus.find(ResponseEvent, past=False, future=5)
+
+# Combine event predicate + event metadata filters
+match = await bus.find(
+ ResponseEvent,
+ where=lambda e: e.request_id == my_id,
+ event_status='completed',
+ future=5,
+)
+
+# Wildcard: match any event type, filtered by metadata/predicate
+any_completed = await bus.find(
+ '*',
+ where=lambda e: e.event_type.endswith('ResultEvent'),
+ event_status='completed',
+ future=5,
+)
```
-A more complex real-world example showing off all the features:
+#### Finding Child Events
+
+When you emit an event that triggers child events, use `child_of` to find specific descendants:
```python
-async def on_generate_invoice_pdf(event: GenerateInvoiceEvent) -> pdf:
- request_event = await bus.dispatch(APIRequestEvent( # example: fire a backend request via some RPC client using bubus
- method='invoices.generatePdf',
- invoice_id=event.invoice_id,
- request_id=uuid4(),
- ))
- # ...rpc client should send the request, then call event_bus.dispatch(APIResponseEvent(...)) when it gets a response ...
+# Emit a parent event that triggers child events
+nav_event = await bus.emit(NavigateToUrlEvent(url="https://example.com"))
- # wait for the response event to be fired by the RPC client
- is_our_response = lambda response_event: response_event.request_id == request_event.request_id
- is_succesful = lambda response_event: response_event.invoice_id == event.invoice_id and response_event.invoice_url
- try:
- response_event: APIResponseEvent = await bus.expect(
- APIResponseEvent, # wait for events of this type (also accepts str name)
- include=lambda e: is_our_response(e) and is_succesful(e), # only include events that match a certain filter func
- exclude=lambda e: e.status != 'retrying', # optionally exclude certain events, overrides include
- timeout=30, # raises asyncio.TimeoutError if no match is seen within 30sec
- )
- except TimeoutError:
- await bus.dispatch(TimedOutError(msg='timed out while waiting for response from server', request_id=request_event.id))
+# Find a child event (already fired while NavigateToUrlEvent was being handled)
+new_tab = await bus.find(TabCreatedEvent, child_of=nav_event, past=5)
+if new_tab:
+ print(f"New tab created: {new_tab.tab_id}")
+```
- return response_event.invoice_url
+This solves race conditions where child events fire before you start waiting for them.
-event_bus.on(GenerateInvoiceEvent, on_generate_invoice_pdf)
-```
+See the `EventBus.find(...)` API section below for full parameter details.
> [!IMPORTANT]
-> `expect()` resolves when the event is first *dispatched* to the `EventBus`, not when it completes. `await response_event` to get the completed event.
+> `find()` resolves when the event is first *emitted* to the `EventBus`, not when it completes. Use `await event` to wait for handlers to finish.
+> If no match is found (or future timeout elapses), `find()` returns `None`.
+
+
+
+### π Event Debouncing
+
+Avoid re-running expensive work by reusing recent events. The `find()` method makes debouncing simple:
+
+```python
+# Simple debouncing: reuse event from last 10 seconds, or emit new
+event = await (
+ await bus.find(ScreenshotEvent, past=10, future=False) # Check last 10s of history (instant)
+ or bus.emit(ScreenshotEvent())
+)
+
+# Advanced: check history, wait briefly for new event to appear, fallback to emit new event
+event = (
+ await bus.find(SyncEvent, past=True, future=False) # Check all history (instant)
+ or await bus.find(SyncEvent, past=False, future=5) # Wait up to 5s for in-flight
+ or bus.emit(SyncEvent()) # Fallback: emit new
+)
+await event # get completed event
+```
@@ -321,34 +408,38 @@ There are two ways to get return values from event handlers:
**1. Have handlers return their values directly, which puts them in `event.event_results`:**
```python
-class DoSomeMathEvent(BaseEvent[int]): # BaseEvent[int] = expect int returned from all event handlers
+class DoSomeMathEvent(BaseEvent[int]): # BaseEvent[int] = handlers are validated as returning int
a: int
b: int
+ # int passed above gets saved to:
+ # event_result_type = int
+
def do_some_math(event: DoSomeMathEvent) -> int:
return event.a + event.b
event_bus.on(DoSomeMathEvent, do_some_math)
-print(await event_bus.dispatch(DoSomeMathEvent(a=100, b=120)).event_result())
+print(await event_bus.emit(DoSomeMathEvent(a=100, b=120)).event_result())
# 220
```
You can use these helpers to interact with the results returned by handlers:
+
- `BaseEvent.event_result()`
- `BaseEvent.event_results_list()`, `BaseEvent.event_results_filtered()`
- `BaseEvent.event_results_by_handler_id()`, `BaseEvent.event_results_by_handler_name()`
- `BaseEvent.event_results_flat_list()`, `BaseEvent.event_results_flat_dict()`
-**2. Have the handler do the work, then dispatch another event containing the result value, which other code can expect:**
+**2. Have the handler do the work, then emit another event containing the result value, which other code can find:**
```python
def do_some_math(event: DoSomeMathEvent[int]) -> int:
result = event.a + event.b
- event.event_bus.dispatch(MathCompleteEvent(final_sum=result))
+ event.event_bus.emit(MathCompleteEvent(final_sum=result))
event_bus.on(DoSomeMathEvent, do_some_math)
-await event_bus.dispatch(DoSomeMathEvent(a=100, b=120))
-result_event = await event_bus.expect(MathCompleteEvent)
+await event_bus.emit(DoSomeMathEvent(a=100, b=120))
+result_event = await event_bus.find(MathCompleteEvent, past=False, future=30)
print(result_event.final_sum)
# 220
```
@@ -370,7 +461,7 @@ async def on_ScreenshotEvent(event: ScreenshotEvent) -> bytes:
event_bus.on(ScreenshotEvent, on_ScreenshotEvent)
# Handler return values are automatically validated against the bytes type
-returned_bytes = await event_bus.dispatch(ScreenshotEvent(...)).event_result()
+returned_bytes = await event_bus.emit(ScreenshotEvent(...)).event_result()
assert isinstance(returned_bytes, bytes)
```
@@ -407,9 +498,74 @@ async def fetch_from_gmail(event: FetchInboxEvent) -> list[EmailMessage]:
event_bus.on(FetchInboxEvent, fetch_from_gmail)
# Return values are automatically validated as list[EmailMessage]
-email_list = await event_bus.dispatch(FetchInboxEvent(account_id='124', ...)).event_result()
+email_list = await event_bus.emit(FetchInboxEvent(account_id='124', ...)).event_result()
+```
+
+For pure Python usage, `event_result_type` can be any Python/Pydantic type you want. For cross-language JSON roundtrips, object-like shapes (e.g. `TypedDict`, `dataclass`, model-like dict schemas) rehydrate on Python as Pydantic models, map keys are constrained to JSON object string keys, and fine-grained string constraints/custom field validator logic is not preserved.
+
+
+
+### π§΅ ContextVar Propagation
+
+ContextVars set before `emit()` are automatically propagated to event handlers. This is essential for request-scoped context like request IDs, user sessions, or tracing spans:
+
+```python
+from contextvars import ContextVar
+
+# Define your context variables
+request_id: ContextVar[str] = ContextVar('request_id', default='')
+user_id: ContextVar[str] = ContextVar('user_id', default='')
+
+async def handler(event: MyEvent) -> str:
+ # Handler sees the context values that were set before emit()
+ print(f"Request: {request_id.get()}, User: {user_id.get()}")
+ return "done"
+
+bus.on(MyEvent, handler)
+
+# Set context before emit (e.g., in FastAPI middleware)
+request_id.set('req-12345')
+user_id.set('user-abc')
+
+# Handler will see request_id='req-12345' and user_id='user-abc'
+await bus.emit(MyEvent())
+```
+
+**Context propagates through nested handlers:**
+
+```python
+async def parent_handler(event: ParentEvent) -> str:
+ # Context is captured at emit time
+ print(f"Parent sees: {request_id.get()}") # 'req-12345'
+
+ # Child events inherit the same context
+ await bus.emit(ChildEvent())
+ return "parent_done"
+
+async def child_handler(event: ChildEvent) -> str:
+ # Child also sees the original emit context
+ print(f"Child sees: {request_id.get()}") # 'req-12345'
+ return "child_done"
+```
+
+**Context isolation between emits:**
+
+Each emit captures its own context snapshot. Concurrent emits with different context values are properly isolated:
+
+```python
+request_id.set('req-A')
+event_a = bus.emit(MyEvent()) # Handler A sees 'req-A'
+
+request_id.set('req-B')
+event_b = bus.emit(MyEvent()) # Handler B sees 'req-B'
+
+await event_a # Still sees 'req-A'
+await event_b # Still sees 'req-B'
```
+> [!NOTE]
+> Context is captured at `emit()` time, not when the handler executes. This ensures handlers see the context from the call site, even if the event is processed later from a queue.
+
### π§Ή Memory Management
@@ -422,10 +578,18 @@ bus = EventBus(max_history_size=100) # Keep max 100 events in history
# Or disable memory limits for unlimited history
bus = EventBus(max_history_size=None)
+
+# Or keep only in-flight events in history (drop each event as soon as it completes)
+bus = EventBus(max_history_size=0)
+
+# Or reject new emits when history is full (instead of dropping old history)
+bus = EventBus(max_history_size=100, max_history_drop=False)
```
**Automatic Cleanup:**
-- When `max_history_size` is set, EventBus automatically removes old events when the limit is exceeded
+- When `max_history_size` is set and `max_history_drop=True`, EventBus removes old events when the limit is exceeded
+- If `max_history_size=0`, history keeps only pending/started events and drops each event immediately after completion
+- If `max_history_drop=True`, the bus may drop oldest history entries even if they are uncompleted events
- Completed events are removed first (oldest first), then started events, then pending events
- This ensures active events are preserved while cleaning up old completed events
@@ -459,36 +623,75 @@ The harsh tradeoff is less deterministic ordering as handler execution order wil
```python
# Create bus with parallel handler execution
-bus = EventBus(parallel_handlers=True)
+bus = EventBus(event_handler_concurrency='parallel')
# Multiple handlers run concurrently for each event
bus.on('DataEvent', slow_handler_1) # Takes 1 second
bus.on('DataEvent', slow_handler_2) # Takes 1 second
start = time.time()
-await bus.dispatch(DataEvent())
+await bus.emit(DataEvent())
# Total time: ~1 second (not 2)
```
-### π Write-Ahead Logging
+### π§© Middlwares
-Persist events automatically to a `jsonl` file for future replay and debugging:
+Middlewares can observe or mutate the `EventResult` at each step, emit additional events, or trigger other side effects (metrics, retries, auth checks, etc.).
```python
-# Enable WAL event log persistence (optional)
-bus = EventBus(name='MyBus', wal_path='./events.jsonl')
+from bubus import EventBus
+from bubus.middlewares import LoggerEventBusMiddleware, WALEventBusMiddleware, SQLiteHistoryMirrorMiddleware, OtelTracingMiddleware
+
+bus = EventBus(
+ name='MyBus',
+ middlewares=[
+ SQLiteHistoryMirrorMiddleware('./events.sqlite3'),
+ WALEventBusMiddleware('./events.jsonl'),
+ LoggerEventBusMiddleware('./events.log'),
+ OtelTracingMiddleware(),
+ # ...
+ ],
+)
-# All completed events are automatically appended as JSON lines to the end
-bus.dispatch(SecondEventAbc(some_key="banana"))
+await bus.emit(SecondEventAbc(some_key="banana"))
+# will persist all events to sqlite + events.jsonl + events.log
```
-`./events.jsonl`:
-```json
-{"event_type": "FirstEventXyz", "event_created_at": "2025-07-10T20:39:56.462000+00:00", "some_key": "some_val", ...}
-{"event_type": "SecondEventAbc", ..., "some_key": "banana"}
-...
+Built-in middlwares you can import from `bubus.middlwares.*`:
+
+- `AutoErrorEventMiddleware`: on handler error, fire-and-forget emits `OriginalEventTypeErrorEvent` with `{error, error_type}` (skips `*ErrorEvent`/`*ResultEvent` sources). Useful when downstream/remote consumers only see events and need explicit failure notifications.
+- `AutoReturnEventMiddleware`: on non-`None` handler return, fire-and-forget emits `OriginalEventTypeResultEvent` with `{data}` (skips `*ErrorEvent`/`*ResultEvent` sources). Useful for bridges/remote systems since handler return values do not cross bridge boundaries, but events do.
+- `AutoHandlerChangeEventMiddleware`: emits `BusHandlerRegisteredEvent({handler})` / `BusHandlerUnregisteredEvent({handler})` when handlers are added/removed via `.on()` / `.off()`.
+- `OtelTracingMiddleware`: emits OpenTelemetry spans for events and handlers with parent-child linking; can be exported to Sentry via Sentry's OpenTelemetry integration.
+- `WALEventBusMiddleware`: persists completed events to JSONL for replay/debugging.
+- `LoggerEventBusMiddleware`: writes event/handler transitions to stdout and optionally to file.
+- `SQLiteHistoryMirrorMiddleware`: mirrors event and handler snapshots into append-only SQLite `events_log` and `event_results_log` tables for auditing/debugging.
+
+#### Defining a custom middleware
+
+Handler middlewares subclass `EventBusMiddleware` and override whichever lifecycle hooks they need (`on_event_change`, `on_event_result_change`, `on_handler_change`):
+
+```python
+from bubus.middlewares import EventBusMiddleware
+
+class AnalyticsMiddleware(EventBusMiddleware):
+ async def on_event_result_change(self, eventbus, event, event_result, status):
+ if status == 'started':
+ await analytics_bus.emit(HandlerStartedAnalyticsEvent(event_id=event_result.event_id))
+ elif status == 'completed':
+ await analytics_bus.emit(
+ HandlerCompletedAnalyticsEvent(
+ event_id=event_result.event_id,
+ error=repr(event_result.error) if event_result.error else None,
+ )
+ )
+
+ async def on_handler_change(self, eventbus, handler, registered):
+ await analytics_bus.emit(
+ HandlerRegistryChangedEvent(handler_id=handler.id, registered=registered, bus=eventbus.name)
+ )
```
@@ -507,18 +710,34 @@ The main event bus class that manages event processing and handler execution.
```python
EventBus(
name: str | None = None,
- wal_path: Path | str | None = None,
- parallel_handlers: bool = False,
- max_history_size: int | None = 50
+ event_handler_concurrency: Literal['serial', 'parallel'] = 'serial',
+ event_handler_completion: Literal['all', 'first'] = 'all',
+ event_timeout: float | None = 60.0,
+ event_slow_timeout: float | None = 300.0,
+ event_handler_slow_timeout: float | None = 30.0,
+ event_handler_detect_file_paths: bool = True,
+ max_history_size: int | None = 50,
+ max_history_drop: bool = False,
+ middlewares: Sequence[EventBusMiddleware | type[EventBusMiddleware]] | None = None,
)
```
**Parameters:**
- `name`: Optional unique name for the bus (auto-generated if not provided)
-- `wal_path`: Path for write-ahead logging of events to a `jsonl` file (optional)
-- `parallel_handlers`: If `True`, handlers run concurrently for each event, otherwise serially if `False` (the default)
-- `max_history_size`: Maximum number of events to keep in history (default: 50, None = unlimited)
+- `event_handler_concurrency`: Default handler execution mode for events on this bus: `'serial'` (default) or `'parallel'` (copied onto `event.event_handler_concurrency` at emit time unless the event sets its own value)
+- `event_handler_completion`: Handler completion mode for each event: `'all'` (default, wait for all handlers) or `'first'` (complete once first successful non-`None` result is available)
+- `event_timeout`: Default per-event timeout in seconds applied at emit time when `event.event_timeout` is `None`
+- `event_slow_timeout`: Default slow-event warning threshold in seconds
+- `event_handler_slow_timeout`: Default slow-handler warning threshold in seconds
+- `event_handler_detect_file_paths`: Whether to auto-detect handler source file paths at registration time (slightly slower when enabled)
+- `max_history_size`: Maximum number of events to keep in history (default: 50, `None` = unlimited, `0` = keep only in-flight events and drop completed events immediately)
+- `max_history_drop`: If `True`, drop oldest history entries when full (even uncompleted events). If `False` (default), reject new emits once history reaches `max_history_size` (except when `max_history_size=0`, which never rejects on history size)
+- `middlewares`: Optional list of `EventBusMiddleware` subclasses or instances that hook into handler execution for analytics, logging, retries, etc. (see [Middlwares](#middlwares) for more info)
+
+Timeout precedence matches TS:
+- Effective handler timeout = `min(resolved_handler_timeout, event_timeout)` where `resolved_handler_timeout` resolves in order: `handler.handler_timeout` -> `event.event_handler_timeout` -> `bus.event_timeout`.
+- Slow handler warning threshold resolves in order: `handler.handler_slow_timeout` -> `event.event_handler_slow_timeout` -> `event.event_slow_timeout` -> `bus.event_handler_slow_timeout` -> `bus.event_slow_timeout`.
#### `EventBus` Properties
@@ -530,7 +749,6 @@ EventBus(
- `events_completed`: List of completed events
- `all_instances`: Class-level WeakSet tracking all active EventBus instances (for memory monitoring)
-
#### `EventBus` Methods
##### `on(event_type: str | Type[BaseEvent], handler: Callable)`
@@ -543,30 +761,76 @@ bus.on(UserEvent, handler_func) # By event class
bus.on('*', handler_func) # Wildcard - all events
```
-##### `dispatch(event: BaseEvent) -> BaseEvent`
+##### `emit(event: BaseEvent) -> BaseEvent`
Enqueue an event for processing and return the pending `Event` immediately (synchronous).
```python
-event = bus.dispatch(MyEvent(data="test"))
+event = bus.emit(MyEvent(data="test"))
result = await event # await the pending Event to get the completed Event
```
-**Note:** When `max_history_size` is set, EventBus enforces a hard limit of 100 pending events (queue + processing) to prevent runaway memory usage. Dispatch will raise `RuntimeError` if this limit is exceeded.
+**Note:** Queueing is unbounded. History pressure is controlled by `max_history_size` + `max_history_drop`:
+
+- `max_history_drop=True`: absorb new events and trim old history entries (even uncompleted events).
+- `max_history_drop=False`: raise `RuntimeError` when history is full.
+- `max_history_size=0`: keep pending/in-flight events only; completed events are immediately removed from history.
+
+##### `find(event_type: str | Literal['*'] | Type[BaseEvent], *, where: Callable[[BaseEvent], bool]=None, child_of: BaseEvent | None=None, past: bool | float | timedelta=True, future: bool | float=False, **event_fields) -> BaseEvent | None`
+
+Find an event matching criteria in history and/or future. This is the recommended unified method for event lookup.
-##### `expect(event_type: str | Type[BaseEvent], timeout: float | None=None, predicate: Callable[[BaseEvent], bool]=None) -> BaseEvent`
+**Parameters:**
-Wait for a specific event to occur.
+- `event_type`: The event type string, `'*'` wildcard, or model class to find
+- `where`: Predicate function for filtering (default: matches all)
+- `child_of`: Only match events that are descendants of this parent event
+- `past`: Controls history search behavior (default: `True`)
+ - `True`: search all history
+ - `False`: skip history search
+ - `float`/`timedelta`: search events from last N seconds only
+- `future`: Controls future wait behavior (default: `False`)
+ - `True`: wait forever for matching event
+ - `False`: don't wait for future events
+ - `float`: wait up to N seconds for matching event
+- `**event_fields`: Optional equality filters for any event fields (for example `event_status='completed'`, `user_id='u-1'`)
```python
-# Wait for any UserEvent
-event = await bus.expect('UserEvent', timeout=30)
+# Default call is non-blocking history lookup (past=True, future=False)
+event = await bus.find(ResponseEvent)
-# Wait with custom filter
-event = await bus.expect(
- 'UserEvent',
- predicate=lambda e: e.user_id == 'specific_user'
-)
+# Find child of a specific parent event
+child = await bus.find(ChildEvent, child_of=parent_event, future=5)
+
+# Wait only for future events (ignore history)
+event = await bus.find(ResponseEvent, past=False, future=5)
+
+# Search recent history + optionally wait
+event = await bus.find(ResponseEvent, past=5, future=5)
+
+# Filter by event metadata
+completed = await bus.find(ResponseEvent, event_status='completed')
+
+# Wildcard match across all event types
+any_completed = await bus.find('*', event_status='completed', past=True, future=False)
+```
+
+##### `event_is_child_of(event: BaseEvent, ancestor: BaseEvent) -> bool`
+
+Check if event is a descendant of ancestor (child, grandchild, etc.).
+
+```python
+if bus.event_is_child_of(child_event, parent_event):
+ print("child_event is a descendant of parent_event")
+```
+
+##### `event_is_parent_of(event: BaseEvent, descendant: BaseEvent) -> bool`
+
+Check if event is an ancestor of descendant (parent, grandparent, etc.).
+
+```python
+if bus.event_is_parent_of(parent_event, child_event):
+ print("parent_event is an ancestor of child_event")
```
##### `wait_until_idle(timeout: float | None=None)`
@@ -603,36 +867,36 @@ Make sure none of your own event data fields start with `event_` or `model_` to
T_EventResultType = TypeVar('T_EventResultType', bound=Any, default=None)
class BaseEvent(BaseModel, Generic[T_EventResultType]):
- # Framework-managed fields
- event_type: str # Defaults to class name
+ # special config fields
event_id: str # Unique UUID7 identifier, auto-generated if not provided
- event_timeout: float = 60.0 # Maximum execution in seconds for each handler
- event_schema: str # Module.Class@version (auto-set based on class & LIBRARY_VERSION env var)
- event_parent_id: str # Parent event ID (auto-set)
- event_path: list[str] # List of bus names traversed (auto-set)
- event_created_at: datetime # When event was created, auto-generated
- event_results: dict[str, EventResult] # Handler results
- event_result_type: type[T_EventResultType] | None # Auto-detected from Generic[T] parameter
+ event_type: str # Defaults to class name e.g. 'BaseEvent'
+ event_result_type: Any | None # Pydantic model/python type to validate handler return values, defaults to T_EventResultType
+ event_version: str # Defaults to '0.0.1' (override per class/instance for event payload versioning)
+ event_timeout: float | None = None # Event timeout in seconds (bus default applied at emit time if None)
+ event_handler_timeout: float | None = None # Optional per-event handler timeout cap in seconds
+ event_handler_slow_timeout: float | None = None # Optional per-event slow-handler warning threshold
+ event_handler_concurrency: Literal['serial', 'parallel'] = 'serial' # handler scheduling strategy for this event
+ event_handler_completion: Literal['all', 'first'] = 'all' # completion strategy for this event's handlers
+
+ # runtime state fields
+ event_status: Literal['pending', 'started', 'completed'] # event processing status (auto-set)
+ event_created_at: datetime # When event was created, auto-generated (auto-set)
+ event_started_at: datetime | None # When first handler started executing during event processing (auto-set)
+ event_completed_at: datetime | None # When all event handlers finished processing (auto-set)
+ event_parent_id: str | None # Parent event ID that led to this event during handling (auto-set)
+ event_path: list[str] # List of bus labels traversed, e.g. BusName#ab12 (auto-set)
+ event_results: dict[str, EventResult] # Handler results {: EventResult} (auto-set)
+ event_children: list[BaseEvent] # getter property to list any child events emitted during handling
+ event_bus: EventBus # getter property to get the bus the event was emitted on
- # Data fields
- # ... subclass BaseEvent to add your own event data fields here ...
+ # payload fields
+ # ... subclass BaseEvent to add your own event payload fields here ...
# some_key: str
# some_other_key: dict[str, int]
# ...
+ # (they should not start with event_* to avoid conflict with special built-in fields)
```
-`event.event_results` contains a dict of pending `EventResult` objects that will be completed once handlers finish executing.
-
-
-#### `BaseEvent` Properties
-
-- `event_status`: `Literal['pending', 'started', 'complete']` Event status
-- `event_started_at`: `datetime` When first handler started processing
-- `event_completed_at`: `datetime` When all handlers completed processing
-- `event_children`: `list[BaseEvent]` Get any child events emitted during handling of this event
-- `event_bus`: `EventBus` Shortcut to get the bus currently processing this event
-- `event_result_type`: `type[Any] | None` Expected handler return type (auto-detected from `BaseEvent[T]` generic parameter)
-
#### `BaseEvent` Methods
##### `await event`
@@ -640,13 +904,31 @@ class BaseEvent(BaseModel, Generic[T_EventResultType]):
Await the `Event` object directly to get the completed `Event` object once all handlers have finished executing.
```python
-event = bus.dispatch(MyEvent())
+event = bus.emit(MyEvent())
completed_event = await event
raw_result_values = [(await event_result) for event_result in completed_event.event_results.values()]
# equivalent to: completed_event.event_results_list() (see below)
```
+##### `first(timeout: float | None=None, *, raise_if_any: bool=False, raise_if_none: bool=False) -> Any`
+
+Set `event_handler_completion='first'`, wait for completion, and return the first successful non-`None` handler result.
+
+```python
+event = bus.emit(MyEvent())
+value = await event.first()
+```
+
+##### `reset() -> Self`
+
+Return a fresh event copy with runtime processing state reset back to pending.
+
+- Intended for re-emitting an already-seen event as a fresh event (for example after crossing a bridge boundary).
+- The original event object is not mutated, it returns a new copy with some fields reset.
+- A new UUIDv7 `event_id` is generated for the returned copy (to allow it to process as a separate event it needs a new unique uuid)
+- Runtime completion state is cleared (`event_results`, completion signal/flags, processed timestamp, emit context).
+
##### `event_result(timeout: float | None=None, include: EventResultFilter=None, raise_if_any: bool=True, raise_if_none: bool=True) -> Any`
Utility method helper to execute all the handlers and return the first handler's raw result value.
@@ -762,6 +1044,17 @@ long_lists = await event.event_results_flat_list(include=lambda r: isinstance(r.
all_items = await event.event_results_flat_list(raise_if_any=False, raise_if_none=False)
```
+##### `event_create_pending_results(handlers: dict[str, EventHandler], eventbus: EventBus | None = None, timeout: float | None = None) -> dict[str, EventResult]`
+
+Create (or reset) the `EventResult` placeholders for the provided handlers. The `EventBus` uses this internally before it begins executing handlers so that the event's state is immediately visible. Advanced users can call it when coordinating handler execution manually.
+
+```python
+applicable_handlers = bus._get_applicable_handlers(event) # internal helper shown for illustration
+pending_results = event.event_create_pending_results(applicable_handlers, eventbus=bus)
+
+assert all(result.status == 'pending' for result in pending_results.values())
+```
+
##### `event_bus` (property)
Shortcut to get the `EventBus` that is currently processing this event. Can be used to avoid having to pass an `EventBus` instance to your handlers.
@@ -770,14 +1063,13 @@ Shortcut to get the `EventBus` that is currently processing this event. Can be u
bus = EventBus()
async def some_handler(event: MyEvent):
- # You can always dispatch directly to any bus you have a reference to
- child_event = bus.dispatch(ChildEvent())
+ # You can always emit directly to any bus you have a reference to
+ child_event = bus.emit(ChildEvent())
# OR use the event.event_bus shortcut to get the current bus:
- child_event = await event.event_bus.dispatch(ChildEvent())
+ child_event = await event.event_bus.emit(ChildEvent())
```
-
---
### `EventResult`
@@ -785,7 +1077,7 @@ async def some_handler(event: MyEvent):
The placeholder object that represents the pending result from a single handler executing an event.
`Event.event_results` contains a `dict[PythonIdStr, EventResult]` in the shape of `{handler_id: EventResult()}`.
-You shouldn't need to ever directly use this class, it's an internal wrapper to track pending and completed results from each handler within `BaseEvent.event_results`.
+You generally won't interact with this class directlyβthe bus instantiates and updates it for youβbut its API is documented here for advanced integrations and custom emit loops.
#### `EventResult` Fields
@@ -799,12 +1091,12 @@ class EventResult(BaseModel):
status: str # 'pending', 'started', 'completed', 'error'
result: Any # Handler return value
- error: str | None # Error message if failed
+ error: BaseException | None # Captured exception if the handler failed
- started_at: datetime # When handler started
- completed_at: datetime # When handler completed
- timeout: float # Handler timeout in seconds
- child_events: list[BaseEvent] # list of child events emitted during handler execution
+ started_at: datetime | None # When handler started
+ completed_at: datetime | None # When handler completed
+ timeout: float | None # Handler timeout in seconds
+ event_children: list[BaseEvent] # child events emitted during handler execution
```
#### `EventResult` Methods
@@ -818,32 +1110,76 @@ handler_result = event.event_results['handler_id']
value = await handler_result # Returns result or raises an exception if handler hits an error
```
+- `execute(event, handler, *, eventbus, timeout, enter_handler_context, exit_handler_context, format_exception_for_log)`
+ Low-level helper that runs the handler, updates timing/status fields, captures errors, and notifies its completion signal. `EventBus.execute_handler()` delegates to this; you generally only need it when building a custom bus or integrating the event system into another emitter runtime.
+
+### `EventHandler`
+
+Serializable metadata wrapper around a registered handler callable.
+
+You usually get an `EventHandler` back from `bus.on(...)`, can pass it to `bus.off(...)`, and may see it in middleware hooks like `on_handler_change(...)`.
+
+#### `EventHandler` Fields
+
+```python
+class EventHandler(BaseModel):
+ id: str # Stable handler identifier
+ handler_name: str # Callable name
+ handler_file_path: str | None # Source file path (if known)
+ handler_timeout: float | None # Optional per-handler timeout override
+ handler_slow_timeout: float | None # Optional "slow handler" threshold
+ handler_registered_at: datetime # Registration timestamp (datetime)
+ handler_registered_ts: int # Registration timestamp (ns epoch)
+ event_pattern: str # Registered event pattern (type name or '*')
+ eventbus_name: str # Owning EventBus name
+ eventbus_id: str # Owning EventBus ID
+```
+
+The raw callable is stored on `handler`, but is excluded from JSON serialization (`to_json_dict()`).
+
+#### `EventHandler` Properties and Methods
+
+- `label` (property): Short display label like `my_handler#abcd`.
+- `__call__(event)`: Invokes the wrapped callable directly.
+- `to_json_dict() -> dict[str, Any]`: JSON-safe metadata dump (excludes callable).
+- `from_json_dict(data, handler=None) -> EventHandler`: Rebuilds metadata; optional callable reattachment.
+- `from_callable(...) -> EventHandler`: Build a new handler entry from a callable plus bus/pattern metadata.
+
---
## π§΅ Advanced Concurrency Control
+### `EventBus`, `BaseEvent`, and `EventHandler` concurrency config fields
+
+These options can be set as bus-level defaults, event-level options, or as handler-specific options.
+They control the concurrency of how events are processed within a bus, across all busses, and how handlers execute within a single event.
+
+- `event_concurrency`: `'global-serial' | 'bus-serial' | 'parallel'` controls event-level scheduling (`None` on events defers to bus default)
+- `event_handler_concurrency`: `'serial' | 'parallel'` should handlers on a single event run in parallel or in sequential order
+- `event_handler_completion`: `'all' | 'first'` should all handlers run, or should we stop handler execution once any handler returns a non-`None` value
+
### `@retry` Decorator
-The `@retry` decorator provides automatic retry functionality with built-in concurrency control for any function, including event handlers. This is particularly useful when handlers interact with external services that may temporarily fail.
+The `@retry` decorator provides automatic retry functionality with built-in concurrency control for any function, including event handlers. This is particularly useful when handlers interact with external services that may temporarily fail. It can be used completely independently from the rest of the library, it does not require a bus and can be used more generally to control concurrenty/timeouts/retries of any python function.
```python
from bubus import EventBus, BaseEvent
-from bubus.helpers import retry
+from bubus.retry import retry
bus = EventBus()
-class FetchDataEvent(BaseEvent):
+class FetchDataEvent(BaseEvent[dict[str, Any]]):
url: str
@retry(
- wait=2, # Wait 2 seconds between retries
- retries=3, # Retry up to 3 times after initial failure
+ retry_after=2, # Wait 2 seconds between retries
+ max_attempts=3, # Total attempts including initial call
timeout=5, # Each attempt times out after 5 seconds
semaphore_limit=5, # Max 5 concurrent executions
- backoff_factor=1.5, # Exponential backoff: 2s, 3s, 4.5s
- retry_on=(TimeoutError, ConnectionError) # Only retry on specific exceptions
+ retry_backoff_factor=1.5, # Exponential backoff: 2s, 3s, 4.5s
+ retry_on_errors=[TimeoutError, ConnectionError], # Only retry on specific exceptions
)
-async def fetch_with_retry(event: FetchDataEvent):
+async def fetch_with_retry(event: FetchDataEvent) -> dict[str, Any]:
# This handler will automatically retry on network failures
async with aiohttp.ClientSession() as session:
async with session.get(event.url) as response:
@@ -854,16 +1190,16 @@ bus.on(FetchDataEvent, fetch_with_retry)
#### Retry Parameters
-- **`timeout`**: Maximum amount of time function is allowed to take per attempt, in seconds (default: 5)
-- **`retries`**: Number of additional retry attempts if function raises an exception (default: 3)
-- **`retry_on`**: Tuple of exception types to retry on (default: `None` = retry on any `Exception`)
-- **`wait`**: Base seconds to wait between retries (default: 3)
-- **`backoff_factor`**: Multiplier for wait time after each retry (default: 1.0)
+- **`timeout`**: Maximum amount of time function is allowed to take per attempt, in seconds (`None` = unbounded, default: `None`)
+- **`max_attempts`**: Total attempts including the first attempt (minimum effective value: `1`, default: `1`)
+- **`retry_on_errors`**: List of exception classes or compiled regex matchers. Regexes are matched against `f"{err.__class__.__name__}: {err}"` (default: `None` = retry on any `Exception`)
+- **`retry_after`**: Base seconds to wait between retries (default: 0)
+- **`retry_backoff_factor`**: Multiplier for wait time after each retry (default: 1.0)
- **`semaphore_limit`**: Maximum number of concurrent calls that can run at the same time
-- **`semaphore_scope`**: Scope for the semaphore: `class`, `self`, `global`, or `multiprocess`
-- **`semaphore_timeout`**: Maximum time to wait for a semaphore slot before proceeding or failing
+- **`semaphore_scope`**: Scope for the semaphore: `class`, `instance`, `global`, or `multiprocess`
+- **`semaphore_timeout`**: Maximum time to wait for a semaphore slot before proceeding or failing. If omitted: `timeout * max(1, semaphore_limit - 1)` when `timeout` is set, otherwise wait forever
- **`semaphore_lax`**: Continue anyway if semaphore fails to be acquired in within the given time
-- **`semaphore_name`**: Unique semaphore name to allow sharing a semaphore between functions
+- **`semaphore_name`**: Unique semaphore name (string) or callable getter that receives function args and returns a name
#### Semaphore Options
@@ -881,7 +1217,7 @@ class MyService:
# Per-instance semaphore - each instance gets its own limit
class MyService:
- @retry(semaphore_limit=1, semaphore_scope='self')
+ @retry(semaphore_limit=1, semaphore_scope='instance')
async def instance_limited_handler(self, event): ...
# Cross-process semaphore - all processes share one limit
@@ -902,15 +1238,15 @@ class DatabaseEvent(BaseEvent):
class DatabaseService:
@retry(
- wait=1,
- retries=5,
+ retry_after=1,
+ max_attempts=5,
timeout=10,
semaphore_limit=10, # Max 10 concurrent DB operations
semaphore_scope='class', # Shared across all instances
semaphore_timeout=30, # Wait up to 30s for semaphore
semaphore_lax=False, # Fail if can't acquire semaphore
- backoff_factor=2.0, # Exponential backoff: 1s, 2s, 4s, 8s, 16s
- retry_on=(ConnectionError, TimeoutError)
+ retry_backoff_factor=2.0, # Exponential backoff: 1s, 2s, 4s, 8s, 16s
+ retry_on_errors=[ConnectionError, TimeoutError],
)
async def execute_query(self, event: DatabaseEvent):
# Automatically retries on connection failures
@@ -925,6 +1261,22 @@ bus.on(DatabaseEvent, db_service.execute_query)
+---
+
+
+
+## π Performance (Python)
+
+```bash
+uv run tests/performance_runtime.py # run the performance test suite in python
+```
+
+| Runtime | 1 bus x 50k events x 1 handler | 500 busses x 100 events x 1 handler | 1 bus x 1 event x 50k parallel handlers | 1 bus x 50k events x 50k one-off handlers | Worst case (N busses x N events x N handlers) |
+| ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
+| Python | `0.179ms/event`, `0.235kb/event` | `0.191ms/event`, `0.191kb/event` | `0.035ms/handler`, `8.164kb/handler` | `0.255ms/event`, `0.185kb/event` | `0.351ms/event`, `5.867kb/event` |
+
+
+
---
---
@@ -932,10 +1284,10 @@ bus.on(DatabaseEvent, db_service.execute_query)
## πΎ Development
-Set up the development environment using `uv`:
+Set up the python development environment using `uv`:
```bash
-git clone https://github.com/browser-use/bubus && cd bubus
+git clone https://github.com/pirate/bbus && cd bbus
# Create virtual environment with Python 3.12
uv venv --python 3.12
@@ -960,12 +1312,21 @@ uv run pytest -vxs --full-trace tests/
# Run specific test file
uv run pytest tests/test_eventbus.py
+
+# Run Python perf suite
+uv run tests/performance_runtime.py
+
+# Run the entire lint+test+examples+perf suite for both python and ts
+./test.sh
```
+> For Bubus-TS development see the `bubus-ts/README.md` `# Development` section.
+
## π Inspiration
- https://www.cosmicpython.com/book/chapter_08_events_and_message_bus.html#message_bus_diagram βοΈ
- https://developer.mozilla.org/en-US/docs/Web/API/EventTarget βοΈ
+- https://github.com/sindresorhus/emittery βοΈ (equivalent for JS), https://github.com/EventEmitter2/EventEmitter2, https://github.com/vitaly-t/sub-events
- https://github.com/pytest-dev/pluggy βοΈ
- https://github.com/teamhide/fastapi-event βοΈ
- https://github.com/ethereum/lahja βοΈ
@@ -985,11 +1346,11 @@ uv run pytest tests/test_eventbus.py
---
-> [π§ DeepWiki Docs](https://deepwiki.com/browser-use/bubus)
+> [π§ DeepWiki Docs](https://deepwiki.com/pirate/bbus)
> 
## ποΈ License
-This project is licensed under the MIT License. For more information, see the main browser-use repository: https://github.com/browser-use/browser-use
+This project is licensed under the MIT License.
diff --git a/TODO_python.md b/TODO_python.md
new file mode 100644
index 0000000..34f238c
--- /dev/null
+++ b/TODO_python.md
@@ -0,0 +1,285 @@
+# TODO: Python Unified Runtime Refactor (Middleware + Context-Manager Style)
+
+This plan merges:
+- current middleware runtime behavior
+- context-manager refactor goals from `TODO_context_manager_refactor.md`
+
+into one ordered Python implementation plan.
+This file supersedes `TODO_context_manager_refactor.md` for Python implementation planning.
+
+## Semantic Decisions (Already Fixed)
+
+1. `event_timeout` is a hard cap across all handlers for an event.
+2. `event_handler_timeout` defaults to `event_timeout` when unset.
+3. Queue-jump path keeps lock-object checks and re-entrant lock correctness.
+4. `first()` winner in Python is first non-`None`, non-error, non-`BaseEvent` result.
+5. Core handler error taxonomy must include:
+- `EventHandlerCancelledError`
+- `EventHandlerResultSchemaError`
+- `EventHandlerTimeoutError`
+- `EventHandlerAbortedError`
+
+## Constraints (Locked In)
+
+1. Middleware throw behavior is undefined (no suppression/guarding required).
+2. Hook re-fires are acceptable (simple implementation preferred).
+3. Keep queue-jump semantics and lock correctness.
+4. Preserve current behavior unless explicitly listed as a semantic change.
+5. Docs updates are out of scope for this pass.
+
+## Target Architecture
+
+### Context-manager style
+
+Explicit scope boundaries for lock/context/timeout/slow-monitor/error handling:
+
+- `event_lock(...)`
+- `handler_lock(...)`
+- `handler_dispatch_context(...)`
+- `handler_execution_context(event, handler_id)` (single context manager replacing enter/exit token helpers)
+- timeout/slow/error wrapper helpers used consistently by both event and handler execution paths
+
+### Middleware orchestration
+
+- Middleware execution remains centralized in `EventBus` internal notifier methods.
+- Hook calls remain sequential in middleware registration order.
+- No middleware exception handling contract is added.
+
+## Verified Runtime Trace (Implementation Anchors)
+
+Use these concrete call paths to constrain refactor changes:
+
+1. Dispatch + context capture:
+- `EventBus.emit(...)` applies defaults and captures dispatch context with `contextvars.copy_context()`.
+- This is the correct anchor for enqueue-time lifecycle behavior (`event: pending` emission remains bus-owned).
+
+2. Runloop + event processing:
+- `_run_loop_weak(...)` dequeues and branches serial/parallel based on `locks.get_lock_for_event(...)`.
+- `step(event=...)` acquires event lock via `locks.lock_for_event(...)` then calls `handle_event(...)`.
+- `handle_event(...)` is the event-level orchestration seam for slow monitor, handler execution, and completion propagation.
+
+3. Handler execution stack:
+- `_execute_handlers(...)` creates pending results and applies first/all completion policy.
+- `execute_handler(...)` acquires `locks.lock_for_event_handler(...)` then delegates to `EventResult.execute(...)`.
+- `EventResult.execute(...)` is already the core timeout/slow/error normalization path.
+
+4. Queue-jump path:
+- Await-inside-handler flows through `_wait_for_completion_inside_handler()` to `_process_self_on_all_buses()`.
+- Slow-path logic depends on lock-object/in-flight checks (`get_lock_for_event(...)` + processing id tracking), so refactor must preserve lock-aware checks instead of enum-only shortcuts.
+
+5. Context boundary anchors:
+- ContextVar state currently enters/exits through `_enter_handler_execution_context(...)` and `_exit_handler_execution_context(...)`.
+- Lock-manager dispatch context helper (`lock_context_for_current_handler(...)`) must remain coupled to handler execution context behavior after consolidation.
+
+### Compact Line-Anchored Call Graph
+
+- `EventBus.emit(...)` defaults/context capture/queue insert: `bubus/event_bus.py:930`.
+- `_run_loop_weak(...)` dequeue + serial/parallel branch via lock query: `bubus/event_bus.py:1587`.
+- `step(...)` event lock + handle dispatch seam: `bubus/event_bus.py:1795`.
+- `handle_event(...)` event-level orchestration seam: `bubus/event_bus.py:1887`.
+- `_execute_handlers(...)` pending result creation + first/all logic: `bubus/event_bus.py:2066`.
+- `execute_handler(...)` handler lock + result execute seam: `bubus/event_bus.py:2194`.
+- `EventResult.execute(...)` handler timeout/slow/error core: `bubus/base_event.py:458`.
+- Queue-jump path: `_process_self_on_all_buses(...)`: `bubus/base_event.py:779`.
+- Current ContextVar enter/exit helpers:
+ - `_enter_handler_execution_context(...)`: `bubus/event_bus.py:2025`.
+ - `_exit_handler_execution_context(...)`: `bubus/event_bus.py:2033`.
+
+### Scope-Nesting Rationale
+
+- Preferred layering for event/handler scopes is:
+ - lock -> error save/normalization -> hard timeout -> slow monitor -> work.
+- Timeout should wrap slow monitor so timeout cancellation tears down monitor task cleanly.
+- Error wrapper should remain outermost around timeout/monitor/work so timeout, cancellation, and generic handler failures are normalized consistently.
+
+## Sequencing Rationale (Why This Order)
+
+1. Middleware constructor normalization lands first to fix API/runtime mismatch without touching lock/timeout semantics.
+2. Lock-manager protocol seam lands before behavior changes, with compatibility aliases kept, so call sites can migrate incrementally.
+3. Handler execution context consolidation happens before timeout hard-cap work to avoid changing context and timeout behavior in one step.
+4. Event-level hard timeout is introduced only after wrappers/seams exist, reducing risk around queue-jump and cancellation propagation.
+5. Queue-jump safety verification is a dedicated phase after seam extraction to catch regressions in re-entrancy and release/reacquire behavior.
+
+## Ordered Implementation Plan
+
+## Phase 0: Baseline + invariants
+
+1. Snapshot current behavior with existing tests:
+- `tests/test_event_handler_concurrency.py`
+- `tests/test_event_handler_completion.py`
+- `tests/test_handler_timeout.py`
+- `tests/test_event_timeout_defaults.py`
+- `tests/test_context_propagation.py`
+- `tests/test_parent_event_tracking.py`
+- `tests/test_eventbus.py` (middleware sections)
+
+2. Capture explicit invariants before refactor:
+- queue-jump still respects lock/re-entrancy behavior
+- handler timeout resolution precedence remains unchanged unless explicitly changed below
+- first-winner semantics and handler error taxonomy remain stable across refactor phases
+
+## Phase 1: Middleware normalization + constructor parity cleanup
+
+Files:
+- `bubus/event_bus.py`
+- `bubus/middlewares.py` (typing only if needed)
+
+Changes:
+1. Normalize `middlewares` input in constructor:
+- instance entries pass through
+- class entries auto-instantiate (`MiddlewareClass()`)
+2. Keep internal storage as `list[EventBusMiddleware]`.
+3. Internal hook loop order remains registration order.
+4. No middleware exception suppression wrappers are added.
+
+Note:
+- This resolves existing doc/runtime mismatch where docs mention class-or-instance inputs.
+
+## Phase 2: LockManager protocol seam (compat first)
+
+Files:
+- `bubus/lock_manager.py`
+- `bubus/event_bus.py`
+- `bubus/base_event.py` (call-site rename updates)
+
+Changes:
+1. Introduce protocol/interface seam (`LockManagerProtocol`) exposing:
+- `event_lock(...)`
+- `handler_lock(...)`
+- `handler_dispatch_context(...)`
+2. Keep compatibility aliases:
+- `lock_for_event` -> `event_lock`
+- `lock_for_event_handler` -> `handler_lock`
+- `lock_context_for_current_handler` -> `handler_dispatch_context`
+3. Keep lock identity query path available (`get_lock_for_event(...)`) for queue-jump slow-path checks.
+4. Keep compatibility aliases until both runloop path and queue-jump path are migrated and tested.
+
+## Phase 3: Handler execution context manager
+
+Files:
+- `bubus/event_bus.py`
+
+Changes:
+1. Replace `_enter_handler_execution_context(...)` and `_exit_handler_execution_context(...)`
+with one context manager:
+- `handler_execution_context(event, handler_id)`
+2. Keep exact ContextVar semantics.
+3. Include lock dispatch context mark in this manager so lock/context behavior remains coupled and explicit.
+4. Keep behavior identical for both async and sync handler invocation paths.
+
+## Phase 4: Timeout/slow/error wrapper extraction
+
+Files:
+- `bubus/event_bus.py`
+- `bubus/base_event.py`
+- `bubus/helpers.py`
+- `bubus/event_handler.py` (define shared handler error classes here)
+
+Changes:
+1. Extract reusable wrappers/helpers:
+- `_timeout_scope(timeout)`
+- `_slow_monitor(...)`
+- `_save_handler_errors(...)` (or equivalent)
+2. Apply wrappers in `EventResult.execute()` without changing observed handler-level behavior first.
+3. Then apply same wrapper style to event-level execution path.
+4. Introduce/normalize shared handler error classes for parity:
+- `EventHandlerCancelledError`
+- `EventHandlerResultSchemaError`
+- `EventHandlerTimeoutError`
+- `EventHandlerAbortedError`
+5. Ensure terminal handler paths map to these classes consistently.
+
+## Phase 5: Event-level hard timeout semantic change
+
+Files:
+- `bubus/event_bus.py`
+- `bubus/base_event.py`
+
+Semantic decision implemented here:
+1. `event_timeout` becomes a hard cap across all handlers for an event.
+2. `event_handler_timeout` defaults to `event_timeout` when unset (retain this resolution rule).
+
+Implementation steps:
+1. Wrap event processing scope (`handle_event` path) in event-level timeout.
+2. Ensure timeout finalization preserves existing cancellation cascade expectations.
+3. Keep middleware lifecycle notifications monotonic through timeout transitions.
+4. Ensure `event_handler_timeout` fallback remains `event_timeout` after refactor.
+5. Keep no-wait cancellation semantics for timed-out in-flight child processing consistent.
+
+## Phase 6: Queue-jump lock safety after seam extraction
+
+Files:
+- `bubus/base_event.py`
+- `bubus/event_bus.py`
+- `bubus/lock_manager.py`
+
+Checks/changes:
+1. Keep lock-object checks in queue-jump slow-path (do not reduce to enum-only checks).
+2. Preserve `ReentrantLock` depth semantics across copied context.
+3. Keep cross-bus queue-jump behavior unchanged.
+4. Ensure no regression in release/reacquire behavior during nested execution.
+
+## Phase 7: `first()` and taxonomy alignment pass
+
+Files:
+- `bubus/event_bus.py`
+- `bubus/base_event.py`
+- `tests/test_event_handler_completion.py`
+- `tests/test_eventbus.py`
+
+Changes:
+1. Re-assert/lock winner criteria:
+- non-error
+- non-`BaseEvent`
+- non-`None`
+2. Ensure cancellation of non-winners remains consistent with queue-jump and timeout behaviors.
+3. Add/adjust tests explicitly for `None` non-winner behavior.
+4. Add/adjust tests for taxonomy classes on timeout/cancel/abort/result-schema paths.
+
+## Phase 8: Middleware lifecycle alignment pass
+
+Files:
+- `bubus/event_bus.py`
+- `bubus/middlewares.py` (only if signatures/typing need sync)
+- `tests/test_eventbus.py`
+
+Changes:
+1. Confirm event + result + handler-change hooks are emitted from centralized notifiers.
+2. Confirm ordering remains:
+- event: `pending -> started -> completed`
+- result: `pending -> started -> completed`
+3. Keep simple behavior:
+- re-fires allowed
+- middleware throws undefined behavior
+
+## Phase 9: Verification matrix
+
+Run:
+```bash
+python -m pytest tests/test_eventbus.py -xvs
+python -m pytest tests/test_event_handler_concurrency.py -xvs
+python -m pytest tests/test_event_handler_completion.py -xvs
+python -m pytest tests/test_handler_timeout.py -xvs
+python -m pytest tests/test_event_timeout_defaults.py -xvs
+python -m pytest tests/test_events_suck.py -xvs
+python -m pytest tests/test_context_propagation.py -xvs
+python -m pytest tests/test_parent_event_tracking.py -xvs
+python -m pytest tests/test_stress_20k_events.py -xvs
+```
+
+Add/adjust tests for:
+1. middleware constructor auto-init from class entries
+2. sequential hook order
+3. queue-jump correctness under new context-manager seams
+4. event hard-timeout behavior
+5. taxonomy assertions for cancelled/aborted/timeout/result-schema
+6. `first()` winner filtering behavior (`None` is non-winner)
+
+## Done Criteria
+
+1. Context-manager architecture is explicit and unified.
+2. Lock manager has a stable interface seam with compatibility aliases.
+3. Middleware path is normalized and centralized.
+4. Event hard-timeout semantics are implemented and tested.
+5. Queue-jump/lock correctness is preserved.
+6. `first()` and taxonomy semantics match locked decisions.
diff --git a/TODO_ts.md b/TODO_ts.md
new file mode 100644
index 0000000..52d7377
--- /dev/null
+++ b/TODO_ts.md
@@ -0,0 +1,314 @@
+# TODO: TypeScript Unified Runtime Refactor (Middleware + Context-Manager Style)
+
+This plan merges:
+- middleware support work (`TODO_middleware.md`)
+- context/lock/timeout architecture work (`TODO_context_manager_refactor.md`)
+
+into one ordered TS implementation plan.
+This file supersedes those two TODOs for TypeScript implementation planning.
+
+## Semantic Decisions (Already Fixed)
+
+1. `event_timeout` is a hard cap across all handlers for an event.
+2. `event_handler_timeout` defaults to `event_timeout` when unset.
+3. Queue-jump path remains lock-driven; no enum-only shortcut logic.
+4. `first()` winner in TS is first non-`undefined`, non-error, non-`BaseEvent` result (`null` is valid).
+5. Core handler error taxonomy must include:
+- `EventHandlerCancelledError`
+- `EventHandlerResultSchemaError`
+- `EventHandlerTimeoutError`
+- `EventHandlerAbortedError`
+6. Event status lifecycle remains `pending | started | completed` (errors live on `EventResult`, not `Event`).
+
+## Constraints (Locked In)
+
+1. `middlewares` must accept either classes (constructors) or instances.
+2. Class entries in `middlewares` are auto-instantiated.
+3. Hook execution is sequential in registration order.
+4. Middleware throw behavior is undefined (no suppression/guarding required).
+5. Hook re-fires are acceptable (keep implementation simple).
+6. Per-bus hook emission is correct (different buses can have different middleware stacks).
+7. `markCancelled` must never produce `completed -> started`.
+8. Docs updates are out of scope for this pass.
+
+## Target Architecture
+
+### Core execution ownership
+
+- `EventBus` owns orchestration.
+- `BaseEvent`/`EventResult` only perform local state mutation + upward notifications.
+- Middleware invocation is centralized in `EventBus`.
+
+### Context-manager style in TS
+
+Use scoped wrapper functions (functional context-manager style):
+
+- `withEventLock(event, fn)`
+- `withHandlerLock(event, result, fn)`
+- `withHandlerDispatchContext(event, fn)` (or equivalent wrapper around existing ALS restore)
+- `withTimeout(timeoutSecs, fn)` for event-level and handler-level wrapping
+- `withSlowMonitor(start/stop monitor, fn)`
+
+These wrappers should compose, and keep the current semaphore/queue-jump behavior.
+
+## Verified Runtime Trace (Implementation Anchors)
+
+Use these concrete call paths to keep changes behavior-safe while refactoring:
+
+1. Dispatch + context capture:
+- `EventBus.dispatch(...)` captures async context via `captureAsyncContext()`.
+- Queue insert + runloop kick happen here; this is the correct `event: pending` emission anchor.
+
+2. Runloop + event processing:
+- `EventBus.runloop(...)` dequeues and resolves semaphore policy through `LockManager.getSemaphoreForEvent(...)`.
+- `EventBus.processEvent(...)` is the bus-local orchestration seam for `event: started/completed`, pending result creation, and future event-level hard timeout wrapper.
+
+3. Handler execution stack:
+- `BaseEvent.createPendingHandlerResults(...)` creates result records; this is the correct `event_result: pending` emission anchor.
+- `BaseEvent.processEvent(...)` drives all/first semantics.
+- `EventResult.runHandler(...)` already centralizes handler lock acquisition, timeout race, slow warnings, abort signaling, and terminal result marking.
+
+4. Queue-jump/cross-bus path:
+- `BaseEvent.done()/immediate()` route through `EventBus.processEventImmediately(...)`.
+- `EventBus.processEventImmediatelyAcrossBuses(...)` and `HandlerLock.runQueueJump(...)` coordinate pause/release and semaphore bypass where required.
+- Any lock abstraction must preserve this exact lifecycle.
+
+5. Status mutation anchors:
+- Event transitions live in `BaseEvent.markStarted/markCompleted/markCancelled`.
+- Result transitions live in `EventResult.markStarted/markCompleted/markError`.
+- Middleware integration should hook at these transition anchors via bus notifiers, not by duplicating status logic in middleware code.
+
+### Compact Line-Anchored Call Graph
+
+- `EventBus.dispatch(...)` enqueue/context capture: `bubus-ts/src/event_bus.ts:450`, `bubus-ts/src/event_bus.ts:459`.
+- `EventBus.runloop(...)` dequeue/event semaphore path: `bubus-ts/src/event_bus.ts:865`.
+- `EventBus.processEvent(...)` bus-local orchestration seam: `bubus-ts/src/event_bus.ts:687`.
+- `BaseEvent.createPendingHandlerResults(...)` pending result materialization: `bubus-ts/src/base_event.ts:373`.
+- `BaseEvent.processEvent(...)` all/first fanout and completion policy: `bubus-ts/src/base_event.ts:394`.
+- `EventResult.runHandler(...)` handler timeout/slow/error core: `bubus-ts/src/event_result.ts:245`.
+- Queue-jump entry/coordination:
+ - `BaseEvent.done()/immediate()`: `bubus-ts/src/base_event.ts:655`.
+ - `EventBus.processEventImmediatelyAcrossBuses(...)`: `bubus-ts/src/event_bus.ts:787`.
+ - `HandlerLock.runQueueJump(...)`: `bubus-ts/src/lock_manager.ts:131`.
+
+### Scope-Nesting Rationale
+
+- Preferred execution layering for both event and handler scopes is:
+ - lock -> error save/normalization -> hard timeout -> slow monitor -> body.
+- Timeout scope wraps slow monitor so timeout cancellation naturally tears down monitoring.
+- Error wrapper stays outside timeout/monitor scopes so timeout and abort errors are normalized consistently into agreed taxonomy.
+
+## Sequencing Rationale (Why This Order)
+
+1. Middleware surface and hook runner land before lock/timeout refactors so lifecycle observability is stable while internals move.
+2. Context-manager wrappers are introduced as thin seams first (no semantic changes), then hot paths are migrated behind them.
+3. Event-level hard timeout is delayed until wrappers exist so timeout finalization can reuse lock/abort/release mechanics instead of duplicating them.
+4. `first()` and taxonomy alignment are late-phase normalization work once timeout + lifecycle behavior is stable.
+5. Built-in middleware parity is explicitly follow-on to avoid coupling core runtime correctness with additional feature surface.
+
+## Ordered Implementation Plan
+
+## Phase 0: Baseline + safety net
+
+1. Run and snapshot current TS tests that cover locking/timeout/first/context:
+- `bubus-ts/tests/locking.test.ts`
+- `bubus-ts/tests/timeout.test.ts`
+- `bubus-ts/tests/first.test.ts`
+- `bubus-ts/tests/context_propagation.test.ts`
+- `bubus-ts/tests/forwarding.test.ts`
+
+2. Add focused middleware test file skeleton (failing tests allowed initially):
+- `bubus-ts/tests/middleware.test.ts`
+
+## Phase 1: Public middleware surface
+
+Files:
+- `bubus-ts/src/middlewares.ts` (new)
+- `bubus-ts/src/event_bus.ts`
+- `bubus-ts/src/index.ts`
+- `bubus-ts/src/types.ts` (only if minor export/type wiring needed)
+
+Changes:
+1. Add `EventBusMiddleware` interface.
+2. Add `EventBusMiddlewareCtor = new () => EventBusMiddleware`.
+3. Re-export `EventStatus` from existing `types.ts` (do not duplicate type definition).
+4. Extend `EventBusOptions` with:
+- `middlewares?: Array`
+5. Constructor normalization:
+- class -> `new Class()`
+- instance -> use directly
+6. Store normalized middlewares on bus runtime state.
+7. Keep middleware types runtime-only (no behavior in this phase).
+
+## Phase 2: Internal middleware hook runner
+
+Files:
+- `bubus-ts/src/event_bus.ts`
+
+Changes:
+1. Add internal notifiers:
+- `_on_event_change(event, status)`
+- `_on_event_result_change(event, result, status)`
+- `_on_handler_change(handler, registered)`
+2. Add `runMiddlewareHook(...)`:
+- iterate middlewares in order
+- `await` each hook sequentially
+- do not catch middleware errors
+3. Add `scheduleMicrotask(fn)` helper:
+- `queueMicrotask` if available
+- fallback `Promise.resolve().then(...)`
+4. Use `scheduleMicrotask` in `startRunloop` for runtime portability.
+5. Keep middleware scheduling and execution outside lock acquisition where possible.
+
+## Phase 3: Hook integration into lifecycle (simple semantics)
+
+Files:
+- `bubus-ts/src/event_bus.ts`
+- `bubus-ts/src/base_event.ts`
+- `bubus-ts/src/event_result.ts`
+
+Changes:
+1. Event hooks:
+- `pending`: emit from `dispatch()` after enqueue, before runloop kick.
+- `started`: emit when event transitions pending -> started.
+- `completed`: emit when event transitions to completed.
+
+2. Event-result hooks:
+- `pending`: emit right after `createPendingHandlerResults(...)` in bus orchestration.
+- `started`: emit on `EventResult.markStarted()` transition.
+- `completed`: emit on `EventResult.markCompleted()` and `EventResult.markError()`.
+
+3. Handler registration hooks:
+- emit `on_handler_change(..., true)` after `on(...)`.
+- emit `on_handler_change(..., false)` after removal in `off(...)`.
+- use fire-and-forget microtask scheduling.
+
+Notes:
+- Re-fires are acceptable; do not add dedupe complexity.
+- Keep ordering monotonic per call site.
+- For sync mutation paths (`markCancelled`, `markCompleted`, `markError`) emit async hooks via microtask helper to avoid changing sync signatures.
+
+## Phase 4: Context-manager style execution seams
+
+Files:
+- `bubus-ts/src/lock_manager.ts`
+- `bubus-ts/src/event_bus.ts`
+- `bubus-ts/src/event_result.ts`
+- `bubus-ts/src/async_context.ts` (if helper extraction needed)
+
+Changes:
+1. Introduce lock-policy wrapper methods around existing semaphores/locks:
+- `withEventLock(...)`
+- `withHandlerLock(...)`
+2. Introduce one composable dispatch-context wrapper around existing ALS restore path.
+3. Migrate hot paths to wrappers without changing behavior:
+- `EventBus.processEvent(...)`
+- `EventResult.runHandler()`
+- queue-jump path (`processEventImmediately...`) must keep current pause/release mechanics.
+4. Keep identity/state access for lock checks used by queue-jump slow/active paths.
+
+## Phase 5: Event-level hard timeout integration
+
+Files:
+- `bubus-ts/src/event_bus.ts`
+- `bubus-ts/src/base_event.ts`
+- `bubus-ts/src/event_result.ts`
+
+Changes:
+1. Add event-level hard timeout wrapper in `EventBus.processEvent(...)` around bus-local handler execution scope.
+2. Ensure middleware latency does not weaken timeout semantics:
+- timeout applies to handler execution scope, not middleware hook runtime.
+3. Timeout finalizer rules (bus-local):
+- started handlers -> `EventHandlerAbortedError` + abort signal
+- pending handlers -> `EventHandlerCancelledError`
+4. Release active execution state in finalizer:
+- `_lock?.exitHandlerRun()`
+- `releaseQueueJumpPauses()`
+- `signalAbort(aborted_error)` for started handlers
+5. Do not wait for in-flight promises after timeout finalization (detach/suppress late completions).
+6. Keep terminal guards so late completions cannot overwrite finalized status.
+7. Ensure hook ordering remains monotonic (`started -> completed`, never reverse).
+8. Reuse descendant cancellation logic for pending child/downstream work.
+9. Keep queue-jump lock safety and semaphore bypass behavior unchanged.
+
+## Phase 6: `first()` winner semantics alignment (TS side)
+
+Files:
+- `bubus-ts/src/base_event.ts`
+- `bubus-ts/src/event_result.ts`
+- `bubus-ts/tests/first.test.ts`
+
+Rules:
+1. Winner must be:
+- non-error
+- non-`BaseEvent`
+- non-`undefined`
+2. `null` is valid winner.
+3. Cancellation of non-winners stays bus-local and consistent with existing behavior.
+4. Align tests to explicitly assert `null` can win while `undefined` cannot.
+
+## Phase 7: Error taxonomy alignment (TS side)
+
+Files:
+- `bubus-ts/src/event_handler.ts`
+- `bubus-ts/src/event_result.ts`
+- `bubus-ts/tests/error_handling.test.ts`
+- `bubus-ts/tests/timeout.test.ts`
+
+Changes:
+1. Ensure all timeout/cancel/abort/result-schema terminal paths use the four agreed classes.
+2. Keep retry-internal error types allowed, but normalize surfaced handler terminal states to agreed taxonomy.
+3. Validate cause/event_result metadata remains attached for middleware/diagnostics.
+
+## Phase 8: Verification + cleanup
+
+1. Run full targeted suite:
+```bash
+cd bubus-ts
+pnpm test tests/middleware.test.ts
+pnpm test tests/locking.test.ts
+pnpm test tests/timeout.test.ts
+pnpm test tests/first.test.ts
+pnpm test tests/error_handling.test.ts
+pnpm test tests/context_propagation.test.ts
+pnpm test tests/forwarding.test.ts
+```
+
+2. Add/adjust tests for:
+- ctor + instance middleware auto-init
+- hook sequencing
+- per-bus hooks on forwarded events
+- no-handler event lifecycle hooks
+- cancellation path ordering (no `completed -> started`)
+- hard event-timeout immediate finalization (no wait on inflight handlers)
+- taxonomy assertions for cancelled/aborted/timeout/result-schema
+
+3. Remove temporary compatibility shims if any were introduced during migration.
+
+## Phase 9: Built-in middleware parity (follow-on)
+
+Files:
+- `bubus-ts/src/middlewares.ts`
+- `bubus-ts/src/index.ts`
+- `bubus-ts/tests/*` (targeted parity tests)
+
+Add TS equivalents of:
+- `AutoErrorEventMiddleware`
+- `AutoReturnEventMiddleware`
+- `AutoHandlerChangeEventMiddleware`
+- `LoggerEventBusMiddleware`
+- `WALEventBusMiddleware`
+- `SQLiteHistoryMirrorMiddleware`
+
+Important UI compatibility note:
+- `SQLiteHistoryMirrorMiddleware` table/schema writes must stay compatible with reads in `ui/db.py`.
+
+## Done Criteria
+
+1. Middleware API is public and stable.
+2. Core lifecycle hooks are emitted in expected order.
+3. Lock/context/timeout wrappers are centralized and composable.
+4. Queue-jump behavior remains correct.
+5. `first()` winner semantics and error taxonomy match locked decisions.
+6. Core unified-runtime targeted tests pass.
+7. Built-in middleware parity is either implemented (Phase 9 done) or explicitly deferred as follow-on.
diff --git a/bubus-ts/README.md b/bubus-ts/README.md
new file mode 100644
index 0000000..6bcdf26
--- /dev/null
+++ b/bubus-ts/README.md
@@ -0,0 +1,838 @@
+# `bubus`: π’ Production-ready multi-language event bus
+
+
+
+[](https://deepwiki.com/pirate/bbus) [](https://pypi.org/project/bubus/) [](https://github.com/pirate/bbus) 
+
+[](https://deepwiki.com/pirate/bbus/3-typescript-implementation) [](https://www.npmjs.com/package/bubus)
+
+Bubus is an in-memory event bus library for async Python and TS (node/bun/deno/browser).
+
+It's designed for quickly building resilient, predictable, complex event-driven apps.
+
+It "just works" with an intuitive, but powerful event JSON format + dispatch API that's consistent across both languages and scales consistently from one event to millions (~0.2ms/event):
+
+```python
+bus.on(SomeEvent, some_function)
+bus.emit(SomeEvent({some_data: 132}))
+```
+
+It's async native, has proper automatic nested event tracking, and powerful concurrency control options. The API is inspired by `EventEmitter` or [`emittery`](https://github.com/sindresorhus/emittery) in JS, but it takes it a step further:
+
+- nice Zod / Pydantic schemas for events that can be exchanged between both languages
+- automatic UUIDv7s and monotonic nanosecond timestamps for ordering events globally
+- built in locking options to force strict global FIFO procesing or fully parallel processing
+
+---
+
+βΎοΈ It's inspired by the simplicity of async and events in `JS` but with baked-in features that allow to eliminate most of the tedious repetitive complexity in event-driven codebases:
+
+- correct timeout enforcement across multiple levels of events, if a parent times out it correctly aborts all child event processing
+- ability to strongly type hint and enforce the return type of event handlers at compile-time
+- ability to queue events on the bus, or inline await them for immediate execution like a normal function call
+- handles ~5,000 events/sec/core in both languages, with ~2kb/event RAM consumed per event during active processing
+
+
+
+## π’ Quickstart
+
+```bash
+npm install bubus
+```
+
+```ts
+import { BaseEvent, EventBus } from 'bubus'
+import { z } from 'zod'
+
+const CreateUserEvent = BaseEvent.extend('CreateUserEvent', {
+ email: z.string(),
+ event_result_type: z.object({ user_id: z.string() }),
+})
+
+const bus = new EventBus('MyAuthEventBus')
+
+bus.on(CreateUserEvent, async (event) => {
+ const user = await yourCreateUserLogic(event.email)
+ return { user_id: user.id }
+})
+
+const event = bus.emit(CreateUserEvent({ email: 'someuser@example.com' }))
+await event.done()
+console.log(event.event_result) // { user_id: 'some-user-uuid' }
+```
+
+
+
+---
+
+
+
+## β¨ Features
+
+The features offered in TS are broadly similar to the ones offered in the python library.
+
+- Typed events with Zod schemas (cross-compatible with Pydantic events from python library)
+- FIFO event queueing with configurable concurrency
+- Nested event support with automatic parent/child tracking
+- Cross-bus forwarding with loop prevention
+- Handler result tracking + validation + timeout enforcement
+- History retention controls (`max_history_size`) for memory bounds
+- Optional `@retry` decorator for easy management of per-handler retries, timeouts, and semaphore-limited execution
+
+See the [Python README](../README.md) for more details.
+
+
+
+---
+
+
+
+## π API Documentation
+
+### `EventBus`
+
+The main bus class that registers handlers, schedules events, and tracks results.
+
+Constructor:
+
+```ts
+new EventBus(name?: string, options?: {
+ id?: string
+ max_history_size?: number | null
+ event_concurrency?: 'global-serial' | 'bus-serial' | 'parallel' | null
+ event_timeout?: number | null
+ event_slow_timeout?: number | null
+ event_handler_concurrency?: 'serial' | 'parallel' | null
+ event_handler_completion?: 'all' | 'first'
+ event_handler_slow_timeout?: number | null
+ event_handler_detect_file_paths?: boolean
+})
+```
+
+#### Constructor options
+
+| Option | Type | Default | Purpose |
+| --------------------------------- | ------------------------------------------------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `id` | `string` | `uuidv7()` | Override bus UUID (mostly for serialization/tests). |
+| `max_history_size` | `number \| null` | `100` | Max events kept in `event_history`; `null` = unbounded; `0` = keep only in-flight events and drop completed events immediately. |
+| `max_history_drop` | `boolean` | `false` | If `true`, when history is full drop oldest history entries (including uncompleted if needed). If `false`, reject new dispatches when history reaches `max_history_size`. |
+| `event_concurrency` | `'global-serial' \| 'bus-serial' \| 'parallel' \| null` | `'bus-serial'` | Event-level scheduling policy. |
+| `event_handler_concurrency` | `'serial' \| 'parallel' \| null` | `'serial'` | Per-event handler scheduling policy. |
+| `event_handler_completion` | `'all' \| 'first'` | `'all'` | Event completion mode if event does not override it. |
+| `event_timeout` | `number \| null` | `60` | Default per-handler timeout budget in seconds (unless overridden). |
+| `event_handler_slow_timeout` | `number \| null` | `30` | Slow handler warning threshold (seconds). |
+| `event_slow_timeout` | `number \| null` | `300` | Slow event warning threshold (seconds). |
+| `event_handler_detect_file_paths` | `boolean` | `true` | Capture source file:line for handlers (slower, better logs). |
+
+#### Runtime state properties
+
+- `id: string`
+- `name: string`
+- `label: string` (`${name}#${id.slice(-4)}`)
+- `handlers: Map`
+- `handlers_by_key: Map`
+- `event_history: Map`
+- `pending_event_queue: BaseEvent[]`
+- `in_flight_event_ids: Set`
+- `locks: LockManager`
+
+#### `on()`
+
+```ts
+on(
+ event_pattern: string | '*' | EventClass,
+ handler: EventHandlerFunction,
+ options?: Partial
+): EventHandler
+```
+
+Use during startup/composition to register handlers.
+
+Advanced `options` fields, these can be used to override defaults per-handler if needed:
+
+- `handler_timeout?: number | null` hard delay before handler execution is aborted with a `HandlerTimeoutError`
+- `handler_slow_timeout?: number | null` delay before emitting a slow handler warning log line
+- `handler_name?: string` optional name to use instead of `anonymous` if handler is an unnamed arrow function
+- `handler_file_path?: string | null` optional path/to/source/file.js:lineno where the handler is defined, used for logging only
+- `id?: string` unique UUID for the handler (normally a hash of bus_id + event_pattern + handler_name + handler_registered_at)
+
+Notes:
+
+- Prefer class/factory keys (`bus.on(MyEvent, handler)`) for typed payload/result inference.
+- String and `'*'` matching are supported (`bus.on('MyEvent', ...)`, `bus.on('*', ...)`).
+- Returns an `EventHandler` object you can later pass to `off()` to de-register the handler if needed.
+
+#### `off()`
+
+```ts
+off(
+ event_pattern: EventPattern | '*',
+ handler?: EventHandlerFunction | string | EventHandler
+): void
+```
+
+Use when tearing down subscriptions (tests, plugin unload, hot-reload).
+
+- Omit `handler` to remove all handlers for `event_pattern`.
+- Pass handler function reference to remove one by function identity.
+- Pass handler id (`string`) or `EventHandler` object to remove by id.
+- use `bus.off('*')` to remove _all_ registered handlers from the bus
+
+#### `dispatch()` / `emit()`
+
+```ts
+dispatch(event: T): T
+emit(event: T): T
+```
+
+`emit()` is just an alias of `dispatch()`.
+
+Behavior notes:
+
+- Per-event configuration options like `event_timeout`, `event_handler_timeout`, etc. are copied from bus defaults at dispatch time if unset
+- If same event ends up forwarded through multiple buses, it is loop-protected using `event_path`.
+- Dispatch is synchronous and returns immediately with the same event object (`event.event_status` is initially `'pending'`).
+
+Normal lifecycle:
+
+1. Create event instance (`const event = MyEvent({...})`).
+2. Dispatch (`const queued = bus.emit(event)`).
+3. Await with `await queued.done()` (immediate/queue-jump semantics) or `await queued.waitForCompletion()` (bus queue order).
+4. Inspect `queued.event_results`, `queued.event_result`, `queued.event_errors`, etc. if you need to access handler return values
+
+#### `find()`
+
+```ts
+find(event_pattern: EventPattern | '*', options?: FindOptions): Promise
+find(
+ event_pattern: EventPattern | '*',
+ where: (event: T) => boolean,
+ options?: FindOptions
+): Promise
+```
+
+Where:
+
+```ts
+type FindOptions = {
+ past?: boolean | number // true to look through all past events, or number in seconds to filter time range
+ future?: boolean | number // true to wait for event to appear indefinitely, or number in seconds to wait for event to appear
+ child_of?: BaseEvent | null // filter to only match events that are a child_of: some_parent_event
+} & {
+ // event_status: 'pending' | 'started' | 'completed'
+ // event_id: 'some-exact-event-uuid-here',
+ // event_started_at: string | null (exact iso datetime string or null)
+ // ... any event field can be passed to filter events using simple equality checks
+ [key: string]: unknown
+}
+```
+
+`bus.find()` returns the first matching event (in dispatch timestamp order).
+To find multiple matching events, iterate through `bus.event_history.filter((event) => ...some condition...)` manually.
+
+`where` behavior:
+Any filter predicate function in the form of `(event) => true | false`, returning true to consider the event a match.
+
+```ts
+const matching_event = bus.find(SomeEvent, (event) => event.some_field == 123)
+// or to match all event types:
+const matching_event = bus.find('*', (event) => event.some_field == 123)
+```
+
+`past` behavior:
+
+- `true`: search all history.
+- `false`: skip searching past event history.
+- `number`: search events dispatched within last `N` seconds.
+
+`future` behavior:
+
+- `true`: wait forever for future match.
+- `false`: do not wait.
+- `number`: wait up to `N` seconds.
+
+Lifecycle use:
+
+- Use for idempotency / de-dupe before dispatch (`past: ...`).
+- Use for synchronization/waiting (`future: ...`).
+- Combine both to "check recent then wait".
+- Add `child_of` to constrain by parent/ancestor event chain.
+- Add any event field (e.g. `event_status`, `event_id`, `event_timeout`, `user_id`) to filter by strict equality.
+- Use wildcard matching with predicates when you want to search all event types: `bus.find('*', (event) => ...)`.
+
+Debouncing expensive events with `find()`:
+
+```ts
+const some_expensive_event = (await bus.find(ExpensiveEvent, { past: 15, future: 5 })) ?? bus.dispatch(ExpensiveEvent({}))
+await some_expensive_event.done()
+```
+
+Important semantics:
+
+- Past lookup matches any dispatched events, not just completed events.
+- Past/future matches resolve as soon as event is dispatched. If you need the completed event, await `event.done()` or pass `{event_status: 'completed'}` to filter only for completed events.
+- If both `past` and `future` are omitted, defaults are `past: true, future: false`.
+- If both `past` and `future` are `false`, it returns `null` immediately.
+- Detailed behavior matrix is covered in `bubus-ts/tests/find.test.ts`.
+
+#### `waitUntilIdle()`
+
+`await bus.waitUntilIdle()` is the normal "drain bus work" call to wait until bus is done processing everything queued.
+
+```ts
+bus.emit(OneEvent(...))
+bus.emit(TwoEvent(...))
+bus.emit(ThreeEvent(...))
+await bus.waitUntilIdle() // this resolves once all three events have finished processing
+```
+
+#### Parent/child/event lookup helpers
+
+```ts
+eventIsChildOf(child_event: BaseEvent, paret_event: BaseEvent): boolean
+eventIsParentOf(parent_event: BaseEvent, child_event: BaseEvent): boolean
+findEventById(event_id: string): BaseEvent | null
+```
+
+#### `toString()` / `toJSON()` / `fromJSON()`
+
+```ts
+toString(): string
+toJSON(): EventBusJSON
+EventBus.fromJSON(data: unknown): EventBus
+```
+
+- `toString()` returns `BusName#abcd` style labels used in logs/errors.
+- `toJSON()` exports full bus state snapshot (config, handlers, indexes, event_history, pending queue, in-flight ids, find-waiter snapshots).
+- `fromJSON()` restores a new bus instance from that payload (handler functions are restored as no-op stubs).
+
+#### `logTree()`
+
+```ts
+logTree(): string
+```
+
+- `logTree()` returns a full event log hierarchy tree diagram for debugging.
+
+#### `destroy()`
+
+```ts
+destroy(): void
+```
+
+- `destroy()` clears handlers/history/locks and removes this bus from global weak registry.
+- `destroy()`/GC behavior is exercised in `bubus-ts/tests/eventbus_basics.test.ts` and `bubus-ts/tests/performance.test.ts`.
+
+### `BaseEvent`
+
+Base class + factory builder for typed event models.
+
+Define your own strongly typed events with `BaseEvent.extend('EventName', {...zod fields...})`:
+
+```ts
+const MyEvent = BaseEvent.extend('MyEvent', {
+ some_key: z.string(),
+ some_other_key: z.number(),
+ // ...
+ // any other payload fields you want to include can go here
+
+ // fields that start with event_* are reserved for metadata used by the library
+ event_result_type: z.string().optional(),
+ event_timeout: 60,
+ // ...
+})
+
+const pending_event = MyEvent({ some_key: 'abc', some_other_key: 234 })
+const queued_event = bus.emit(pending_event)
+const completed_event = await queued_event.done()
+```
+
+API behavior and lifecycle examples:
+
+- `bubus-ts/examples/simple.ts`
+- `bubus-ts/examples/immediate_event_processing.ts`
+- `bubus-ts/examples/forwarding_between_busses.ts`
+- `bubus-ts/tests/eventbus_basics.test.ts`
+- `bubus-ts/tests/find.test.ts`
+- `bubus-ts/tests/first.test.ts`
+- `bubus-ts/tests/event_bus_proxy.test.ts`
+- `bubus-ts/tests/timeout.test.ts`
+- `bubus-ts/tests/event_results.test.ts`
+
+#### Event configuration fields
+
+Special configuration fields you can set on each event to control processing:
+
+- `event_result_type?: z.ZodTypeAny | String | Number | Boolean | Array | Object`
+- `event_version?: string` (default: `'0.0.1'`; useful for your own schema/data migrations)
+- `event_timeout?: number | null`
+- `event_handler_timeout?: number | null`
+- `event_handler_slow_timeout?: number | null`
+- `event_concurrency?: 'global-serial' | 'bus-serial' | 'parallel' | null`
+- `event_handler_concurrency?: 'serial' | 'parallel' | null`
+- `event_handler_completion?: 'all' | 'first'`
+
+#### Runtime state fields
+
+- `event_id`, `event_type`, `event_version`
+- `event_path: string[]` (bus labels like `BusName#ab12`)
+- `event_parent_id: string | null`
+- `event_emitted_by_handler_id: string | null`
+- `event_status: 'pending' | 'started' | 'completed'`
+- `event_results: Map`
+- `event_pending_bus_count: number`
+- `event_created_at: string`, `event_created_ts: number`
+- `event_started_at: string | null`, `event_started_ts: number | null`
+- `event_completed_at: string | null`, `event_completed_ts: number | null`
+
+#### Read-only attributes
+
+- `event_parent` -> `BaseEvent | undefined`
+- `event_children` -> `BaseEvent[]`
+- `event_descendants` -> `BaseEvent[]`
+- `event_errors` -> `Error[]`
+- `all_results` -> `EventResultType[]`
+- `event_result` -> `EventResultType | undefined`
+- `last_result` -> `EventResultType | undefined`
+
+#### `done()`
+
+```ts
+done(): Promise
+```
+
+- `immediate()` is an alias for `done()`.
+- If called from inside a running handler, it queue-jumps child processing immediately.
+- If called outside handler context, it waits for normal completion (or processes immediately if already next).
+- Rejects if event is not attached to a bus (`event has no bus attached`).
+- Queue-jump behavior is demonstrated in `bubus-ts/examples/immediate_event_processing.ts` and `bubus-ts/tests/event_bus_proxy.test.ts`.
+
+#### `waitForCompletion()`
+
+```ts
+waitForCompletion(): Promise
+```
+
+- `finished()` is an alias for `waitForCompletion()`
+- Waits for completion in normal runloop order.
+- Use inside handlers when you explicitly do not want queue-jump behavior.
+
+#### `first()`
+
+```ts
+first(): Promise | undefined>
+```
+
+- Forces `event_handler_completion = 'first'` for this run.
+- Returns temporally first non-`undefined` successful handler result.
+- Cancels pending/running losing handlers on the same bus.
+- Returns `undefined` when no handler produces a successful non-`undefined` value.
+- Cancellation and winner-selection behavior is covered in `bubus-ts/tests/first.test.ts`.
+
+#### `reset()`
+
+```ts
+reset(): this
+```
+
+- Returns a fresh event copy with runtime state reset to pending so it can be dispatched again safely.
+- Original event object is unchanged.
+- Generates a new UUIDv7 `event_id` for the returned copy.
+- Clears runtime completion state (`event_results`, status/timestamps, dispatch context, done signal, local bus binding).
+
+#### `toString()` / `toJSON()` / `fromJSON()`
+
+```ts
+toString(): string
+toJSON(): BaseEventData
+BaseEvent.fromJSON(data: unknown): BaseEvent
+EventFactory.fromJSON?.(data: unknown): TypedEvent
+```
+
+- JSON format is cross-language compatible with Python implementation.
+- `event_result_type` is serialized as JSON Schema when possible and rehydrated on `fromJSON`.
+- In TypeScript-only usage, `event_result_type` can be any Zod schema shape or base type like `number | string | boolean | etc.`. For cross-language roundtrips, object-like schemas (including Python `TypedDict`/`dataclass`-style shapes) are reconstructed on Python as Pydantic models, JSON object keys are always strings, and some fine-grained string-shape constraints may be normalized between Zod and Pydantic.
+- Round-trip coverage is in `bubus-ts/tests/typed_results.test.ts` and `bubus-ts/tests/eventbus_basics.test.ts`.
+
+#### Advanced/internal public methods
+
+Mostly used by bus internals or custom runtimes:
+
+- `markStarted()`
+- `markCancelled(cause)`
+- `markCompleted(force?, notify_parents?)`
+- `createPendingHandlerResults(bus)`
+- `processEvent(pending_entries?)`
+- `cancelPendingDescendants(reason)`
+
+### `EventResult`
+
+Each handler execution creates one `EventResult` stored in `event.event_results`.
+
+#### Main fields
+
+- `id: string` (uuidv7 string)
+- `status: 'pending' | 'started' | 'completed' | 'error'`
+- `event: BaseEvent`
+- `handler: EventHandler`
+- `result: EventResultType | undefined`
+- `error: unknown | undefined`
+- `started_at: string | null` (ISO datetime string)
+- `started_ts: number | null` (monotonic timestamp)
+- `completed_at: string | null` (ISO datetime string)
+- `completed_ts: number | null` (monotonic timestamp)
+- `event_children: BaseEvent[]`
+
+#### Read-only getters
+
+- `event_id` -> `string` uuiv7 of the event the result is for
+- `bus` -> `EventBus` instance it's associated with
+- `handler_id` -> `string` uuidv5 of the `EventHandler`
+- `handler_name` -> `string | 'anonymous'` function name of the handler method
+- `handler_file_path` -> `string | null` path/to/file.js:lineno where the handler method is defined
+- `eventbus_name` -> `string` name, same as `this.bus.name`
+- `eventbus_id` -> `string` uuidv7, same as `this.bus.id`
+- `eventbus_label` -> `string` label, same as `this.bus.label`
+- `value` -> `EventResultType | undefined` alias of `this.result`
+- `raw_value` -> `any` raw result value before schema validation, available when handler return value validation fails
+- `handler_timeout` -> `number` seconds before handler execution is aborted (precedence: handler config -> event config -> bus level defaults)
+- `handler_slow_timeout` -> `number` seconds before logging a slow execution warning (same prececence as `handler_timeout`)
+
+#### Advanced/Internal methods
+
+```ts
+markStarted(): Promise
+markCompleted(result): void
+markError(error): void
+
+runHandler(): Promise
+signalAbort(error: Error): void
+linkEmittedChildEvent(child_event): void
+```
+
+#### `toString()` / `toJSON()` / `fromJSON()`
+
+```ts
+toString(): string
+toJSON(): EventResultJSON
+EventResult.fromJSON(event, data): EventResult
+```
+
+### `EventHandler`
+
+Represents one registered handler entry on a bus. You usually get these from `bus.on(...)`, then pass them to `bus.off(...)` to remove.
+
+#### Main fields
+
+- `id` unique handler UUIDv5 (deterministic hash from bus/event/handler metadata unless overridden)
+- `handler` function reference that executes for matching events
+- `handler_name` function name (or `'anonymous'`)
+- `handler_file_path` detected source path (`~/path/file.ts:line`) or `null`
+- `handler_timeout` optional timeout override in seconds (`null` disables timeout limit)
+- `handler_slow_timeout` optional slow-warning threshold in seconds (`null` disables slow warning)
+- `handler_registered_at` ISO timestamp
+- `handler_registered_ts` monotonic timestamp
+- `event_pattern` subscribed key (`'SomeEvent'` or `'*'`)
+- `eventbus_name` bus name where this handler was registered
+- `eventbus_id` bus UUID where this handler was registered
+
+#### `toString()` / `toJSON()` / `fromJSON()`
+
+```ts
+toString(): string
+toJSON(): EventHandlerJSON
+EventHandler.fromJSON(data: unknown, handler?: EventHandlerFunction): EventHandler
+```
+
+- `toString()` returns `handlerName() (path:line)` when path/name are available, otherwise `function#abcd()`.
+- `toJSON()` emits only serializable handler metadata (never function bodies).
+- `fromJSON()` reconstructs the handler entry and accepts an optional real function to re-bind execution behavior.
+
+
+
+---
+
+
+
+## π§΅ Advanced Concurrency Control
+
+### Concurrency Config Options
+
+#### Bus-level config options (`new EventBus(name, {...options...})`)
+
+- `max_history_size?: number | null` (default: `100`)
+ - Max events kept in history. `null` = unlimited. `bus.find(...)` uses this log to query recently dispatched events
+ - `0` keeps only pending/in-flight events; each event is removed from history immediately after completion.
+- `max_history_drop?: boolean` (default: `false`)
+ - If `true`, drop oldest history entries when history is full (including uncompleted entries if needed).
+ - If `false`, reject new dispatches when history is full.
+- `event_concurrency?: 'global-serial' | 'bus-serial' | 'parallel' | null` (default: `'bus-serial'`)
+ - Event-level scheduling policy (`global-serial`: FIFO across all buses, `bus-serial`: FIFO per bus, `parallel`: concurrent events per bus).
+- `event_handler_concurrency?: 'serial' | 'parallel' | null` (default: `'serial'`)
+ - Handler-level scheduling policy for each event (`serial`: one handler at a time per event, `parallel`: all handlers for the event can run concurrently).
+- `event_handler_completion?: 'all' | 'first'` (default: `'all'`)
+ - Completion strategy (`all`: wait for all handlers, `first`: stop after first non-`undefined` result).
+- `event_timeout?: number | null` (default: `60`)
+ - Default handler timeout budget in seconds.
+- `event_handler_slow_timeout?: number | null` (default: `30`)
+ - Slow-handler warning threshold in seconds.
+- `event_slow_timeout?: number | null` (default: `300`)
+ - Slow-event warning threshold in seconds.
+
+#### Event-level config options
+
+Override the bus defaults on a per-event basis by using these special fields in the event:
+
+```ts
+const event = MyEvent({
+ event_concurrency: 'parallel',
+ event_handler_concurrency: 'parallel',
+ event_handler_completion: 'first',
+ event_timeout: 10,
+ event_handler_timeout: 3,
+})
+```
+
+Notes:
+
+- `null` means "inherit/fall back to bus default" for event-level concurrency and timeout fields.
+- Forwarded events are processed under the target bus's config; source bus config is not inherited.
+- `event_handler_completion` is independent from handler scheduling mode (`serial` vs `parallel`).
+
+#### Handler-level config options
+
+Set at registration:
+
+```ts
+bus.on(MyEvent, handler, { handler_timeout: 2 }) // max time in seconds this handler is allowed to run before it's aborted
+```
+
+#### Precedence and interaction
+
+Event and handler concurrency precedence:
+
+1. Event instance override (`event.event_concurrency`, `event.event_handler_concurrency`)
+2. Bus defaults (`EventBus` options)
+3. Built-in defaults (`bus-serial`, `serial`)
+
+Timeout resolution for each handler run:
+
+1. Resolve handler timeout source:
+ - `bus.on(..., { handler_timeout })`
+ - else `event.event_handler_timeout`
+ - else bus `event_timeout`
+2. Apply event cap:
+ - effective timeout is `min(resolved_handler_timeout, event.event_timeout)` when both are non-null
+ - if either is `null`, the non-null value wins; both null means no timeout
+
+Additional timeout nuance:
+
+- `BaseEvent.event_timeout` starts as `null` unless set; dispatch applies bus default timeout when still unset.
+- Bus/event timeouts are outer budgets for handler execution; use `@retry({ timeout })` for per-attempt timeouts.
+
+Use `@retry` for per-handler execution timeout/retry/backoff/semaphore control. Keep bus/event timeouts as outer execution budgets.
+
+### Runtime lifecycle (bus -> event -> handler)
+
+Dispatch flow:
+
+1. `dispatch()` normalizes to original event and captures async context when available.
+2. Bus applies defaults and appends itself to `event_path`.
+3. Event enters `event_history`, `pending_event_queue`, and runloop starts.
+4. Runloop dequeues and calls `processEvent()`.
+5. Event-level semaphore (`event_concurrency`) is applied.
+6. Handler results are created and executed under handler-level semaphore (`event_handler_concurrency`).
+7. Event completion and child completion propagate through `event_pending_bus_count` and result states.
+8. History trimming evicts completed events first; if still over limit, oldest pending events can be dropped (with warning), then cleanup runs.
+
+Locking model:
+
+- Global event semaphore: `global-serial`
+- Bus event semaphore: `bus-serial`
+- Per-event handler semaphore: `serial` handler mode
+
+### Queue-jumping (`await event.done()` inside handlers)
+
+Want to dispatch and await an event like a function call? simply `await event.done()`.
+When called inside a handler, the awaited event is processed immediately (queue-jump behavior) before normal queued work continues.
+
+### `@retry` Decorator
+
+`retry()` adds retry logic and optional semaphore-based concurrency limiting to async functions/handlers.
+
+#### Why retry is handler-level
+
+Retry and timeout belong on handlers, not emit sites:
+
+- Handlers fail; events are messages.
+- Handler-level retries preserve replay semantics (one event dispatch, internal retry attempts).
+- Bus concurrency and retry concerns are orthogonal and compose cleanly.
+
+#### Recommended pattern: `@retry()` on class methods
+
+```ts
+import { retry, EventBus } from 'bubus'
+
+class ScreenshotService {
+ constructor(private bus: InstanceType) {
+ bus.on(ScreenshotRequestEvent, this.onScreenshot.bind(this))
+ }
+
+ @retry({
+ max_attempts: 4,
+ retry_on_errors: [/timeout/i],
+ timeout: 5,
+ semaphore_scope: 'global',
+ semaphore_name: 'Screenshots',
+ semaphore_limit: 2,
+ })
+ async onScreenshot(event: InstanceType): Promise {
+ return await takeScreenshot(event.data.url)
+ }
+}
+
+const ev = bus.emit(ScreenshotRequestEvent({ url: 'https://example.com' }))
+await ev.done()
+```
+
+#### Also works: inline HOF
+
+```ts
+bus.on(
+ MyEvent,
+ retry({ max_attempts: 3, timeout: 10 })(async (event) => {
+ await riskyOperation(event.data)
+ })
+)
+```
+
+#### Options
+
+| Option | Type | Default | Description |
+| ---------------------- | ----------------------------------------- | ----------- | ----------------------------------------------- |
+| `max_attempts` | `number` | `1` | Total attempts including first call. |
+| `retry_after` | `number` | `0` | Seconds between retries. |
+| `retry_backoff_factor` | `number` | `1.0` | Multiplier for retry delay. |
+| `retry_on_errors` | `(ErrorClass \| string \| RegExp)[]` | `undefined` | Retry filter. `undefined` retries on any error. |
+| `timeout` | `number \| null` | `undefined` | Per-attempt timeout in seconds. |
+| `semaphore_limit` | `number \| null` | `undefined` | Max concurrent executions sharing semaphore. |
+| `semaphore_name` | `string \| ((...args) => string) \| null` | fn name | Semaphore key. |
+| `semaphore_lax` | `boolean` | `true` | Continue if semaphore acquisition times out. |
+| `semaphore_scope` | `'global' \| 'class' \| 'instance'` | `'global'` | Scope for semaphore identity. |
+| `semaphore_timeout` | `number \| null` | `undefined` | Max seconds waiting for semaphore. |
+
+#### Error types
+
+- `RetryTimeoutError`: per-attempt timeout exceeded.
+- `SemaphoreTimeoutError`: semaphore acquisition timeout (`semaphore_lax=false`).
+
+#### Re-entrancy
+
+On Node.js/Bun, `AsyncLocalStorage` tracks held semaphores and avoids deadlocks for nested calls using the same semaphore.
+In browsers, this tracking is unavailable, avoid recursive/nested same-semaphore patterns there.
+
+#### Interaction with bus concurrency
+
+Execution order when used on bus handlers:
+
+1. Bus acquires handler semaphore (`event_handler_concurrency`)
+2. `retry()` acquires retry semaphore (if configured)
+3. Handler executes (with retries)
+4. `retry()` releases retry semaphore
+5. Bus releases handler semaphore
+
+Use bus/event timeouts for outer deadlines and `retry({ timeout })` for per-handler-attempt deadlines.
+
+#### Discouraged: retrying emit sites
+
+Avoid wrapping `emit()/done()` in `retry()` unless you intentionally want multiple event dispatches (a new event for every retry).
+Keep retries on handlers so that your logs represent the original high-level intent, with a single event per call even if handling it took multiple tries.
+Emitting a new event for each retry is only recommended if you are using the logs for debugging more than for replayability / time-travel.
+
+
+
+---
+
+
+
+## Bridges
+
+Bridges are optional extra connectors provided that allow you to send/receive events from an external service, and you do not need to use a bridge to use bubus since it's normally purely in-memory. These are just simple helpers to forward bubus events JSON to storage engines / other processes / other machines; they prevent loops automatically, but beyond that it's only basic forwarding with no handler pickling or anything fancy.
+
+Bridges all expose a very simple bus-like API with only `.emit()` and `.on()`.
+
+**Example usage: link a bus to a redis pub/sub channel**
+
+```ts
+const bridge = new RedisEventBridge('redis://redis@localhost:6379')
+
+bus.on('*', bridge.emit) // listen for all events on bus and send them to redis channel
+bridge.on('*', bus.emit) // listen for new events in redis channel and dispatch them to our bus
+```
+
+- `new SocketEventBridge('/tmp/bubus_events.sock')`
+- `new HTTPEventBridge({ send_to: 'https://127.0.0.1:8001/bubus_events', listen_on: 'http://0.0.0.0:8002/bubus_events' })`
+- `new JSONLEventBridge('/tmp/bubus_events.jsonl')`
+- `new SQLiteEventBridge('/tmp/bubus_events.sqlite3')`
+- `new PostgresEventBridge('postgresql://user:pass@localhost:5432/dbname/bubus_events')`
+- `new RedisEventBridge('redis://user:pass@localhost:6379/1/bubus_events')`
+- `new NATSEventBridge('nats://localhost:4222', 'bubus_events')`
+
+
+
+---
+
+
+
+## π Runtimes
+
+`bubus-ts` supports all major JS runtimes.
+
+- Node.js (default development and test runtime)
+- Browsers (ESM)
+- Bun
+- Deno
+
+### Browser support notes
+
+- The package output is ESM (`./dist/esm`) which is supported by all browsers [released after 2018](https://caniuse.com/?search=ESM)
+- `AsyncLocalStorage` is preserved at dispatch and used during handling when availabe (Node/Bun), otel/tracing context will work normally in those environments
+
+### Performance comparison (local run, per-event)
+
+Measured locally on an `Apple M4 Pro` with:
+
+- `pnpm run perf:node` (`node v22.21.1`)
+- `pnpm run perf:bun` (`bun v1.3.9`)
+- `pnpm run perf:deno` (`deno v2.6.8`)
+- `pnpm run perf:browser` (`chrome v145.0.7632.6`)
+
+| Runtime | 1 bus x 50k events x 1 handler | 500 busses x 100 events x 1 handler | 1 bus x 1 event x 50k parallel handlers | 1 bus x 50k events x 50k one-off handlers | Worst case (N busses x N events x N handlers) |
+| ------------------ | ------------------------------ | ----------------------------------- | --------------------------------------- | ----------------------------------------- | --------------------------------------------- |
+| Node | `0.015ms/event`, `0.6kb/event` | `0.058ms/event`, `0.1kb/event` | `0.021ms/handler`, `3.8kb/handler` | `0.028ms/event`, `0.6kb/event` | `0.442ms/event`, `0.9kb/event` |
+| Bun | `0.011ms/event`, `2.5kb/event` | `0.054ms/event`, `1.0kb/event` | `0.006ms/handler`, `4.5kb/handler` | `0.019ms/event`, `2.8kb/event` | `0.441ms/event`, `3.1kb/event` |
+| Deno | `0.018ms/event`, `1.2kb/event` | `0.063ms/event`, `0.4kb/event` | `0.024ms/handler`, `3.1kb/handler` | `0.064ms/event`, `2.6kb/event` | `0.461ms/event`, `7.9kb/event` |
+| Browser (Chromium) | `0.030ms/event` | `0.197ms/event` | `0.022ms/handler` | `0.022ms/event` | `1.566ms/event` |
+
+Notes:
+
+- `kb/event` is peak RSS delta per event during active processing (most representative of OS-visible RAM in Activity Monitor / Task Manager, with `EventBus.max_history_size=1`)
+- In `1 bus x 1 event x 50k parallel handlers` stats are shown per-handler for clarity, `0.02ms/handler * 50k handlers ~= 1000ms` for the entire event
+- Browser runtime does not expose memory usage directly, in practice memory performance in-browser is comparable to Node (they both use V8)
+
+
+
+---
+
+
+
+## πΎ Development
+
+```bash
+git clone https://github.com/pirate/bbus bubus && cd bubus
+
+cd ./bubus-ts
+pnpm install
+pnpm lint
+pnpm test
+```
diff --git a/bubus-ts/eslint.config.js b/bubus-ts/eslint.config.js
new file mode 100644
index 0000000..458a8b7
--- /dev/null
+++ b/bubus-ts/eslint.config.js
@@ -0,0 +1,25 @@
+import ts_parser from '@typescript-eslint/parser'
+import ts_eslint_plugin from '@typescript-eslint/eslint-plugin'
+
+export default [
+ {
+ ignores: ['dist/**', 'README.md'],
+ },
+ {
+ files: ['**/*.ts'],
+ languageOptions: {
+ parser: ts_parser,
+ parserOptions: {
+ sourceType: 'module',
+ ecmaVersion: 'latest',
+ },
+ },
+ plugins: {
+ '@typescript-eslint': ts_eslint_plugin,
+ },
+ rules: {
+ 'no-unused-vars': 'off',
+ '@typescript-eslint/no-unused-vars': ['error', { argsIgnorePattern: '^_' }],
+ },
+ },
+]
diff --git a/bubus-ts/examples/concurrency_options.ts b/bubus-ts/examples/concurrency_options.ts
new file mode 100755
index 0000000..0f587ad
--- /dev/null
+++ b/bubus-ts/examples/concurrency_options.ts
@@ -0,0 +1,222 @@
+#!/usr/bin/env -S node --import tsx
+// Run: node --import tsx examples/concurrency_options.ts
+
+import { z } from 'zod'
+import { BaseEvent, EventBus, EventHandlerTimeoutError } from '../src/index.js'
+const sleep = (ms: number): Promise =>
+ new Promise((resolve) => {
+ setTimeout(resolve, ms)
+ })
+
+const makeLogger = (section: string) => {
+ const started_at = performance.now()
+ return (message: string) => {
+ const elapsed = (performance.now() - started_at).toFixed(1)
+ console.log(`[${section}] +${elapsed}ms ${message}`)
+ }
+}
+const WorkEvent = BaseEvent.extend('ConcurrencyOptionsWorkEvent', { lane: z.string(), order: z.number(), ms: z.number() })
+const HandlerEvent = BaseEvent.extend('ConcurrencyOptionsHandlerEvent', { label: z.string() })
+const OverrideEvent = BaseEvent.extend('ConcurrencyOptionsOverrideEvent', { label: z.string(), order: z.number(), ms: z.number() })
+const TimeoutEvent = BaseEvent.extend('ConcurrencyOptionsTimeoutEvent', { ms: z.number() })
+
+// 1) Event concurrency at bus level: global-serial vs bus-serial.
+// Observe how max in-flight events differs across two buses.
+async function eventConcurrencyDemo(): Promise {
+ const global_log = makeLogger('event:global-serial')
+ const global_a = new EventBus('GlobalSerialA', { event_concurrency: 'global-serial', event_handler_concurrency: 'serial' })
+ const global_b = new EventBus('GlobalSerialB', { event_concurrency: 'global-serial', event_handler_concurrency: 'serial' })
+ let global_in_flight = 0
+ let global_max = 0
+ const global_handler = async (event: InstanceType) => {
+ global_in_flight += 1
+ global_max = Math.max(global_max, global_in_flight)
+ global_log(`${event.lane}${event.order} start (global in-flight=${global_in_flight})`)
+ await sleep(event.ms)
+ global_log(`${event.lane}${event.order} end`)
+ global_in_flight -= 1
+ }
+ global_a.on(WorkEvent, global_handler)
+ global_b.on(WorkEvent, global_handler)
+ global_a.emit(WorkEvent({ lane: 'A', order: 0, ms: 45 }))
+ global_b.emit(WorkEvent({ lane: 'B', order: 0, ms: 45 }))
+ global_a.emit(WorkEvent({ lane: 'A', order: 1, ms: 45 }))
+ global_b.emit(WorkEvent({ lane: 'B', order: 1, ms: 45 }))
+ await Promise.all([global_a.waitUntilIdle(), global_b.waitUntilIdle()])
+ global_log(`max in-flight across both buses: ${global_max} (expect 1 in global-serial)`)
+ console.log('\n=== global_a.logTree() ===')
+ console.log(global_a.logTree())
+ console.log('\n=== global_b.logTree() ===')
+ console.log(global_b.logTree())
+ const bus_log = makeLogger('event:bus-serial')
+ const bus_a = new EventBus('BusSerialA', { event_concurrency: 'bus-serial', event_handler_concurrency: 'serial' })
+ const bus_b = new EventBus('BusSerialB', { event_concurrency: 'bus-serial', event_handler_concurrency: 'serial' })
+ const per_bus_in_flight = { A: 0, B: 0 }
+ const per_bus_max = { A: 0, B: 0 }
+ let mixed_global_in_flight = 0
+ let mixed_global_max = 0
+ const bus_handler = async (event: InstanceType) => {
+ const lane = event.lane as 'A' | 'B'
+ mixed_global_in_flight += 1
+ mixed_global_max = Math.max(mixed_global_max, mixed_global_in_flight)
+ per_bus_in_flight[lane] += 1
+ per_bus_max[lane] = Math.max(per_bus_max[lane], per_bus_in_flight[lane])
+ bus_log(`${lane}${event.order} start (global=${mixed_global_in_flight}, lane=${per_bus_in_flight[lane]})`)
+ await sleep(event.ms)
+ bus_log(`${lane}${event.order} end`)
+ per_bus_in_flight[lane] -= 1
+ mixed_global_in_flight -= 1
+ }
+ bus_a.on(WorkEvent, bus_handler)
+ bus_b.on(WorkEvent, bus_handler)
+ bus_a.emit(WorkEvent({ lane: 'A', order: 0, ms: 45 }))
+ bus_b.emit(WorkEvent({ lane: 'B', order: 0, ms: 45 }))
+ bus_a.emit(WorkEvent({ lane: 'A', order: 1, ms: 45 }))
+ bus_b.emit(WorkEvent({ lane: 'B', order: 1, ms: 45 }))
+ await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()])
+ bus_log(`max in-flight global=${mixed_global_max}, per-bus A=${per_bus_max.A}, B=${per_bus_max.B} (expect global >= 2, per-bus = 1)`)
+ console.log('\n=== bus_a.logTree() ===')
+ console.log(bus_a.logTree())
+ console.log('\n=== bus_b.logTree() ===')
+ console.log(bus_b.logTree())
+}
+
+// 2) Handler concurrency at bus level: serial vs parallel on the same event.
+// Observe handler overlap for one event with two handlers.
+async function handlerConcurrencyDemo(): Promise {
+ const run_case = async (mode: 'serial' | 'parallel') => {
+ const log = makeLogger(`handler:${mode}`)
+ const bus = new EventBus(`HandlerMode-${mode}`, { event_concurrency: 'parallel', event_handler_concurrency: mode })
+ let in_flight = 0
+ let max_in_flight = 0
+ const make_handler = (name: string, ms: number) => async (event: InstanceType) => {
+ in_flight += 1
+ max_in_flight = Math.max(max_in_flight, in_flight)
+ log(`${event.label}:${name} start (handlers in-flight=${in_flight})`)
+ await sleep(ms)
+ log(`${event.label}:${name} end`)
+ in_flight -= 1
+ }
+ bus.on(HandlerEvent, make_handler('slow', 60))
+ bus.on(HandlerEvent, make_handler('fast', 20))
+ const event = bus.emit(HandlerEvent({ label: mode }))
+ await event.done()
+ await bus.waitUntilIdle()
+ log(`max handler overlap: ${max_in_flight} (expect 1 for serial, >= 2 for parallel)`)
+ console.log(`\n=== ${bus.name}.logTree() ===`)
+ console.log(bus.logTree())
+ }
+ await run_case('serial')
+ await run_case('parallel')
+}
+
+// 3) Event-level overrides take precedence over bus defaults.
+// Bus defaults are strict (bus-serial + serial), then we override both to parallel on event instances.
+async function eventOverrideDemo(): Promise {
+ const log = makeLogger('override:precedence')
+ const bus = new EventBus('OverrideBus', { event_concurrency: 'bus-serial', event_handler_concurrency: 'serial' })
+ let active_events = new Set()
+ let per_event_handlers = new Map()
+ let active_handlers = 0
+ let max_handlers = 0
+ let max_events = 0
+
+ const reset_metrics = () => {
+ active_events = new Set()
+ per_event_handlers = new Map()
+ active_handlers = 0
+ max_handlers = 0
+ max_events = 0
+ }
+ const track_start = (event: InstanceType, handler_name: string, label: string) => {
+ active_handlers += 1
+ max_handlers = Math.max(max_handlers, active_handlers)
+ const count = (per_event_handlers.get(event.event_id) ?? 0) + 1
+ per_event_handlers.set(event.event_id, count)
+ active_events.add(event.event_id)
+ max_events = Math.max(max_events, active_events.size)
+ log(`${label}:${event.order}:${handler_name} start (events=${active_events.size}, handlers=${active_handlers})`)
+ }
+ const track_end = (event: InstanceType, handler_name: string, label: string) => {
+ active_handlers -= 1
+ const count = (per_event_handlers.get(event.event_id) ?? 1) - 1
+ if (count <= 0) {
+ per_event_handlers.delete(event.event_id)
+ active_events.delete(event.event_id)
+ } else {
+ per_event_handlers.set(event.event_id, count)
+ }
+ log(`${label}:${event.order}:${handler_name} end`)
+ }
+
+ const run_pair = async (label: string, use_override: boolean) => {
+ reset_metrics()
+ const handler_a = async (event: InstanceType) => {
+ track_start(event, 'A', label)
+ await sleep(event.ms)
+ track_end(event, 'A', label)
+ }
+ const handler_b = async (event: InstanceType) => {
+ track_start(event, 'B', label)
+ await sleep(event.ms)
+ track_end(event, 'B', label)
+ }
+ bus.off(OverrideEvent)
+ bus.on(OverrideEvent, handler_a)
+ bus.on(OverrideEvent, handler_b)
+ const overrides = use_override ? ({ event_concurrency: 'parallel', event_handler_concurrency: 'parallel' } as const) : {}
+ bus.emit(OverrideEvent({ label, order: 0, ms: 45, ...overrides }))
+ bus.emit(OverrideEvent({ label, order: 1, ms: 45, ...overrides }))
+ await bus.waitUntilIdle()
+ log(`${label} summary -> max events=${max_events}, max handlers=${max_handlers}`)
+ }
+
+ await run_pair('bus-defaults', false)
+ await run_pair('event-overrides', true)
+ console.log('\n=== OverrideBus.logTree() ===')
+ console.log(bus.logTree())
+}
+
+// 4) Handler-level timeout via bus.on(..., { handler_timeout }).
+// Observe one handler timing out while another succeeds on the same event.
+async function handlerTimeoutDemo(): Promise {
+ const log = makeLogger('timeout:handler-option')
+ const bus = new EventBus('TimeoutBus', { event_concurrency: 'parallel', event_handler_concurrency: 'parallel', event_timeout: 0.2 })
+
+ const slow_entry = bus.on(
+ TimeoutEvent,
+ async (event) => {
+ log('slow handler start')
+ await sleep(event.ms)
+ log('slow handler finished body (but may already be timed out)')
+ return 'slow'
+ },
+ { handler_timeout: 0.03 }
+ )
+ bus.on(
+ TimeoutEvent,
+ async () => {
+ log('fast handler start')
+ await sleep(10)
+ log('fast handler end')
+ return 'fast'
+ },
+ { handler_timeout: 0.1 }
+ )
+ const event = bus.emit(TimeoutEvent({ ms: 60, event_handler_timeout: 0.5 }))
+ await event.done()
+ const slow_result = event.event_results.get(slow_entry.id)
+ const handler_timed_out = slow_result?.error instanceof EventHandlerTimeoutError
+ log(`slow handler status=${slow_result?.status}, timeout_error=${handler_timed_out ? 'yes' : 'no'}`)
+ await bus.waitUntilIdle()
+ console.log('\n=== TimeoutBus.logTree() ===')
+ console.log(bus.logTree())
+}
+
+async function main(): Promise {
+ await eventConcurrencyDemo()
+ await handlerConcurrencyDemo()
+ await eventOverrideDemo()
+ await handlerTimeoutDemo()
+}
+await main()
diff --git a/bubus-ts/examples/forwarding_between_busses.ts b/bubus-ts/examples/forwarding_between_busses.ts
new file mode 100755
index 0000000..49f7361
--- /dev/null
+++ b/bubus-ts/examples/forwarding_between_busses.ts
@@ -0,0 +1,96 @@
+#!/usr/bin/env -S node --import tsx
+// Run: node --import tsx examples/forwarding_between_busses.ts
+
+import { z } from 'zod'
+
+import { BaseEvent, EventBus } from '../src/index.js'
+
+const ForwardedEvent = BaseEvent.extend('ForwardedEvent', {
+ message: z.string(),
+})
+
+async function main(): Promise {
+ const busA = new EventBus('BusA')
+ const busB = new EventBus('BusB')
+ const busC = new EventBus('BusC')
+
+ const handleCounts = {
+ BusA: 0,
+ BusB: 0,
+ BusC: 0,
+ }
+
+ const seenEventIds = {
+ BusA: new Set(),
+ BusB: new Set(),
+ BusC: new Set(),
+ }
+
+ // Each bus handles the typed event locally.
+ // In a forwarding cycle, loop prevention should keep each bus to one handle.
+ busA.on(ForwardedEvent, (event) => {
+ handleCounts.BusA += 1
+ seenEventIds.BusA.add(event.event_id)
+ console.log(`[BusA] handled ${event.event_id} (count=${handleCounts.BusA})`)
+ })
+
+ busB.on(ForwardedEvent, (event) => {
+ handleCounts.BusB += 1
+ seenEventIds.BusB.add(event.event_id)
+ console.log(`[BusB] handled ${event.event_id} (count=${handleCounts.BusB})`)
+ })
+
+ busC.on(ForwardedEvent, (event) => {
+ handleCounts.BusC += 1
+ seenEventIds.BusC.add(event.event_id)
+ console.log(`[BusC] handled ${event.event_id} (count=${handleCounts.BusC})`)
+ })
+
+ // Forward all events in a ring:
+ // A -> B -> C -> A
+ // Expected for one dispatch from A: event path becomes [A, B, C] and stops.
+ // The C -> A edge is skipped because A is already in event_path.
+ busA.on('*', busB.emit)
+ busB.on('*', busC.emit)
+ busC.on('*', busA.emit)
+
+ console.log('Dispatching ForwardedEvent on BusA with cyclic forwarding A -> B -> C -> A')
+
+ const event = busA.emit(
+ ForwardedEvent({
+ message: 'hello across 3 buses',
+ })
+ )
+
+ // done() waits for handlers on all forwarded buses, not just the origin bus.
+ await event.done()
+ await Promise.all([busA.waitUntilIdle(), busB.waitUntilIdle(), busC.waitUntilIdle()])
+
+ const path = event.event_path
+ const totalHandles = handleCounts.BusA + handleCounts.BusB + handleCounts.BusC
+
+ console.log('\nFinal propagation summary:')
+ console.log(`- event_id: ${event.event_id}`)
+ console.log(`- event_path: ${path.join(' -> ')}`)
+ console.log(`- handle counts: ${JSON.stringify(handleCounts)}`)
+ console.log(`- unique ids seen per bus: A=${seenEventIds.BusA.size}, B=${seenEventIds.BusB.size}, C=${seenEventIds.BusC.size}`)
+ console.log(`- total handles: ${totalHandles}`)
+
+ const handledOncePerBus = handleCounts.BusA === 1 && handleCounts.BusB === 1 && handleCounts.BusC === 1
+ const visitedThreeBuses = path.length === 3
+
+ if (handledOncePerBus && visitedThreeBuses) {
+ console.log('\nLoop prevention confirmed: each bus handled the event at most once.')
+ } else {
+ console.log('\nUnexpected forwarding result. Check handlers/forwarding setup.')
+ }
+
+ console.log('\n=== BusA logTree() ===')
+ console.log(busA.logTree())
+ console.log('\n=== BusB logTree() ===')
+ console.log(busB.logTree())
+ console.log('\n=== BusC logTree() ===')
+ console.log(busC.logTree())
+}
+
+await main()
diff --git a/bubus-ts/examples/immediate_event_processing.ts b/bubus-ts/examples/immediate_event_processing.ts
new file mode 100755
index 0000000..6d52095
--- /dev/null
+++ b/bubus-ts/examples/immediate_event_processing.ts
@@ -0,0 +1,138 @@
+#!/usr/bin/env -S node --import tsx
+// Run: node --import tsx examples/immediate_event_processing.ts
+
+import { z } from 'zod'
+
+import { BaseEvent, EventBus } from '../src/index.js'
+
+// Parent handler runs two scenarios:
+// 1) await child.done() -> immediate queue-jump processing
+// 2) await child.waitForCompletion() -> normal queue processing
+const ParentEvent = BaseEvent.extend('ImmediateProcessingParentEvent', {
+ mode: z.enum(['immediate', 'queued']),
+})
+
+const ChildEvent = BaseEvent.extend('ImmediateProcessingChildEvent', {
+ scenario: z.enum(['immediate', 'queued']),
+})
+
+const SiblingEvent = BaseEvent.extend('ImmediateProcessingSiblingEvent', {
+ scenario: z.enum(['immediate', 'queued']),
+})
+
+const delay = (ms: number): Promise =>
+ new Promise((resolve) => {
+ setTimeout(resolve, ms)
+ })
+
+type Scenario = 'immediate' | 'queued'
+
+async function main(): Promise {
+ // Two buses: bus_a is the source, bus_b is the forward target.
+ const bus_a = new EventBus('QueueJumpDemoA', {
+ event_concurrency: 'bus-serial',
+ event_handler_concurrency: 'serial',
+ })
+ const bus_b = new EventBus('QueueJumpDemoB', {
+ event_concurrency: 'bus-serial',
+ event_handler_concurrency: 'serial',
+ })
+
+ // Simple step counter so ordering is easy to read in stdout.
+ let step = 0
+ const log = (message: string): void => {
+ step += 1
+ console.log(`${String(step).padStart(2, '0')}. ${message}`)
+ }
+
+ // Forwarding setup: both sibling/child events emitted on bus_a are forwarded to bus_b.
+ bus_a.on(ChildEvent, (event) => {
+ log(`[forward] ${event.event_type}(${event.scenario}) bus_a -> bus_b`)
+ bus_b.emit(event)
+ })
+ bus_a.on(SiblingEvent, (event) => {
+ log(`[forward] ${event.event_type}(${event.scenario}) bus_a -> bus_b`)
+ bus_b.emit(event)
+ })
+
+ // Local handlers on bus_a.
+ bus_a.on(ChildEvent, async (event) => {
+ log(`[bus_a] child start (${event.scenario})`)
+ await delay(8)
+ log(`[bus_a] child end (${event.scenario})`)
+ })
+ bus_a.on(SiblingEvent, async (event) => {
+ log(`[bus_a] sibling start (${event.scenario})`)
+ await delay(14)
+ log(`[bus_a] sibling end (${event.scenario})`)
+ })
+
+ // Forwarded handlers on bus_b.
+ bus_b.on(ChildEvent, async (event) => {
+ log(`[bus_b] child start (${event.scenario})`)
+ await delay(4)
+ log(`[bus_b] child end (${event.scenario})`)
+ })
+ bus_b.on(SiblingEvent, async (event) => {
+ log(`[bus_b] sibling start (${event.scenario})`)
+ await delay(6)
+ log(`[bus_b] sibling end (${event.scenario})`)
+ })
+
+ // Parent handler queues sibling first, then child, then compares await behavior.
+ bus_a.on(ParentEvent, async (event) => {
+ log(`[parent:${event.mode}] start`)
+
+ // Queue a sibling first so normal queue order has sibling ahead of child.
+ event.bus?.emit(SiblingEvent({ scenario: event.mode }))
+ log(`[parent:${event.mode}] sibling queued`)
+
+ // Queue child second; this is the event we await in two different ways.
+ const child = event.bus?.emit(ChildEvent({ scenario: event.mode }))!
+ log(`[parent:${event.mode}] child queued`)
+
+ if (event.mode === 'immediate') {
+ // Queue-jump: child processes immediately while still inside parent handler.
+ log(`[parent:${event.mode}] await child.done()`)
+ await child.done()
+ log(`[parent:${event.mode}] child.done() resolved`)
+ } else {
+ // Normal queue wait: child waits its turn behind already-queued sibling work.
+ log(`[parent:${event.mode}] await child.waitForCompletion()`)
+ await child.waitForCompletion()
+ log(`[parent:${event.mode}] child.waitForCompletion() resolved`)
+ }
+
+ log(`[parent:${event.mode}] end`)
+ })
+
+ const runScenario = async (mode: Scenario): Promise => {
+ log(`----- scenario=${mode} -----`)
+
+ // Parent event uses parallel concurrency so waitForCompletion() in handler
+ // can wait safely while other queued events continue to run.
+ const parent = bus_a.emit(
+ ParentEvent({
+ mode,
+ event_concurrency: 'parallel',
+ })
+ )
+
+ await parent.waitForCompletion()
+ await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()])
+ log(`----- done scenario=${mode} -----`)
+ }
+
+ await runScenario('immediate')
+ await runScenario('queued')
+
+ console.log('\nExpected behavior:')
+ console.log('- immediate: child runs before sibling (queue-jump) and parent resumes right after child.')
+ console.log('- queued: sibling runs first, child waits in normal queue order, parent resumes later.')
+ console.log('\n=== bus_a.logTree() ===')
+ console.log(bus_a.logTree())
+ console.log('\n=== bus_b.logTree() ===')
+ console.log(bus_b.logTree())
+}
+
+await main()
diff --git a/bubus-ts/examples/log_tree_demo.ts b/bubus-ts/examples/log_tree_demo.ts
new file mode 100755
index 0000000..e369011
--- /dev/null
+++ b/bubus-ts/examples/log_tree_demo.ts
@@ -0,0 +1,95 @@
+import { z } from 'zod'
+
+import { BaseEvent, EventBus } from '../src/index.js'
+
+const RootEvent = BaseEvent.extend('RootEvent', {
+ url: z.string(),
+ event_result_type: z.string(),
+})
+
+const ChildEvent = BaseEvent.extend('ChildEvent', {
+ tab_id: z.string(),
+ event_result_type: z.string(),
+})
+
+const GrandchildEvent = BaseEvent.extend('GrandchildEvent', {
+ status: z.string(),
+ event_result_type: z.string(),
+})
+
+const delay = (ms: number): Promise =>
+ new Promise((resolve) => {
+ setTimeout(resolve, ms)
+ })
+
+async function main(): Promise {
+ const bus_a = new EventBus('BusA')
+ const bus_b = new EventBus('BusB')
+
+ async function forward_to_bus_b(event: InstanceType): Promise {
+ await delay(20)
+ bus_b.emit(event)
+ return 'forwarded_to_bus_b'
+ }
+
+ bus_a.on('*', forward_to_bus_b)
+
+ async function root_fast_handler(event: InstanceType): Promise {
+ await delay(10)
+ const child = event.bus?.emit(ChildEvent({ tab_id: 'tab-123', event_timeout: 0.1 }))
+ if (child) {
+ await child.done()
+ }
+ return 'root_fast_handler_ok'
+ }
+
+ async function root_slow_handler(event: InstanceType): Promise {
+ event.bus?.emit(ChildEvent({ tab_id: 'tab-timeout', event_timeout: 0.1 }))
+ await delay(400)
+ return 'root_slow_handler_timeout'
+ }
+
+ bus_a.on(RootEvent, root_fast_handler)
+ bus_a.on(RootEvent, root_slow_handler)
+
+ async function child_slow_handler(_event: InstanceType): Promise {
+ await delay(150)
+ return 'child_slow_handler_done'
+ }
+
+ async function child_fast_handler(event: InstanceType): Promise {
+ await delay(10)
+ const grandchild = event.bus?.emit(GrandchildEvent({ status: 'ok', event_timeout: 0.05 }))
+ if (grandchild) {
+ await grandchild.done()
+ }
+ return 'child_handler_ok'
+ }
+
+ async function grandchild_fast_handler(): Promise {
+ await delay(5)
+ return 'grandchild_fast_handler_ok'
+ }
+
+ async function grandchild_slow_handler(): Promise {
+ await delay(60)
+ return 'grandchild_slow_handler_timeout'
+ }
+
+ bus_b.on(ChildEvent, child_slow_handler)
+ bus_b.on(ChildEvent, child_fast_handler)
+ bus_b.on(GrandchildEvent, grandchild_fast_handler)
+ bus_b.on(GrandchildEvent, grandchild_slow_handler)
+
+ const root_event = bus_a.emit(RootEvent({ url: 'https://example.com', event_timeout: 0.25 }))
+
+ await root_event.done()
+
+ console.log('\n=== BusA logTree ===')
+ console.log(bus_a.logTree())
+
+ console.log('\n=== BusB logTree ===')
+ console.log(bus_b.logTree())
+}
+
+await main()
diff --git a/bubus-ts/examples/parent_child_tracking.ts b/bubus-ts/examples/parent_child_tracking.ts
new file mode 100755
index 0000000..6d8d7f8
--- /dev/null
+++ b/bubus-ts/examples/parent_child_tracking.ts
@@ -0,0 +1,130 @@
+#!/usr/bin/env -S node --import tsx
+// Run: node --import tsx examples/parent_child_tracking.ts
+
+import { z } from 'zod'
+
+import { BaseEvent, EventBus } from '../src/index.js'
+
+// Step 1: Define a tiny parent -> child -> grandchild event model.
+const ParentEvent = BaseEvent.extend('ParentEvent', {
+ workflow: z.string(),
+})
+
+const ChildEvent = BaseEvent.extend('ChildEvent', {
+ stage: z.string(),
+})
+
+const GrandchildEvent = BaseEvent.extend('GrandchildEvent', {
+ note: z.string(),
+})
+
+const shortId = (id?: string): string => (id ? id.slice(-8) : 'none')
+
+async function main(): Promise {
+ // Step 2: Create one bus so parent/child linkage is easy to inspect in one history.
+ const bus = new EventBus('ParentChildTrackingBus')
+
+ // Step 3: Child handler dispatches a grandchild through event.bus.
+ // Because this runs inside ChildEvent handling, grandchild gets linked automatically.
+ bus.on(ChildEvent, async (event: InstanceType): Promise => {
+ console.log(`child handler start: ${event.event_type}#${shortId(event.event_id)}`)
+
+ const grandchild = event.bus?.emit(
+ GrandchildEvent({
+ note: `spawned by ${event.stage}`,
+ })
+ )
+
+ if (grandchild) {
+ console.log(
+ ` child dispatched grandchild: ${grandchild.event_type}#${shortId(grandchild.event_id)} parent_id=${shortId(grandchild.event_parent_id)}`
+ )
+
+ // Step 4: Await a nested event so ordering and linkage are explicit in output.
+ await grandchild.done()
+ console.log(` child resumed after grandchild.done(): ${shortId(grandchild.event_id)}`)
+ }
+
+ return `child_completed:${event.stage}`
+ })
+
+ // Step 5: Grandchild handler is simple; it just marks completion with a string result.
+ bus.on(GrandchildEvent, async (event: InstanceType): Promise => {
+ console.log(`grandchild handler: ${event.event_type}#${shortId(event.event_id)} note="${event.note}"`)
+ return `grandchild_completed:${event.note}`
+ })
+
+ // Step 6: Parent handler emits/dispatches child events via event.bus.
+ // One child is awaited with .done() to clearly show queue-jump + linkage behavior.
+ bus.on(ParentEvent, async (event: InstanceType): Promise => {
+ console.log(`parent handler start: ${event.event_type}#${shortId(event.event_id)} workflow="${event.workflow}"`)
+
+ const awaitedChild = event.bus?.emit(ChildEvent({ stage: 'awaited-child' }))
+ if (awaitedChild) {
+ console.log(
+ ` parent emitted child: ${awaitedChild.event_type}#${shortId(awaitedChild.event_id)} parent_id=${shortId(awaitedChild.event_parent_id)}`
+ )
+
+ // Required by this example: await at least one child so parent/child linkage is obvious.
+ await awaitedChild.done()
+ console.log(` parent resumed after awaited child.done(): ${shortId(awaitedChild.event_id)}`)
+ }
+
+ const backgroundChild = event.bus?.emit(ChildEvent({ stage: 'background-child' }))
+ if (backgroundChild) {
+ console.log(
+ ` parent dispatched second child: ${backgroundChild.event_type}#${shortId(backgroundChild.event_id)} parent_id=${shortId(backgroundChild.event_parent_id)}`
+ )
+ }
+
+ // Parent also dispatches a GrandchildEvent type directly via event.bus.
+ // This is still automatically linked to the parent event.
+ const directGrandchild = event.bus?.emit(GrandchildEvent({ note: 'directly from parent' }))
+ if (directGrandchild) {
+ console.log(
+ ` parent dispatched grandchild type directly: ${directGrandchild.event_type}#${shortId(directGrandchild.event_id)} parent_id=${shortId(directGrandchild.event_parent_id)}`
+ )
+ await directGrandchild.done()
+ }
+
+ return 'parent_completed'
+ })
+
+ // Step 7: Dispatch parent and wait for full bus idle so history is complete.
+ const parent = bus.emit(ParentEvent({ workflow: 'demo-parent-child-tracking' }))
+ await parent.done()
+ await bus.waitUntilIdle()
+
+ // Step 8: Print IDs + relationship checks from event history.
+ console.log('\n=== Event History Relationships ===')
+ const history = Array.from(bus.event_history.values()).sort((a, b) => (a.event_created_ts ?? 0) - (b.event_created_ts ?? 0))
+
+ for (const item of history) {
+ const parentEvent = item.event_parent
+ console.log(
+ [
+ `${item.event_type}#${shortId(item.event_id)}`,
+ `parent=${parentEvent ? `${parentEvent.event_type}#${shortId(parentEvent.event_id)}` : 'none'}`,
+ `isChildOfRoot=${bus.eventIsChildOf(item, parent)}`,
+ `rootIsParentOf=${bus.eventIsParentOf(parent, item)}`,
+ ].join(' | ')
+ )
+ }
+
+ const firstChild = history.find((event) => event.event_type === 'ChildEvent')
+ const nestedGrandchild = history.find(
+ (event) => event.event_type === 'GrandchildEvent' && firstChild && event.event_parent_id === firstChild.event_id
+ )
+ if (firstChild && nestedGrandchild) {
+ console.log(
+ `grandchild->child relationship check: ${nestedGrandchild.event_type}#${shortId(nestedGrandchild.event_id)} is child of ${firstChild.event_type}#${shortId(firstChild.event_id)} = ${bus.eventIsChildOf(nestedGrandchild, firstChild)}`
+ )
+ }
+
+ // Step 9: Print the built-in tree view from event history.
+ console.log('\n=== bus.logTree() ===')
+ const tree = bus.logTree()
+ console.log(tree)
+}
+
+await main()
diff --git a/bubus-ts/examples/simple.ts b/bubus-ts/examples/simple.ts
new file mode 100755
index 0000000..d7274ea
--- /dev/null
+++ b/bubus-ts/examples/simple.ts
@@ -0,0 +1,95 @@
+#!/usr/bin/env -S node --import tsx
+// Run: node --import tsx examples/simple.ts
+
+import { BaseEvent, EventBus } from '../src/index.js'
+import { z } from 'zod'
+
+// 1) Define typed events with BaseEvent.extend(...)
+const RegisterUserEvent = BaseEvent.extend('RegisterUserEvent', {
+ email: z.string().email(),
+ plan: z.enum(['free', 'pro']),
+ // Handler return values for this event are validated against this schema.
+ event_result_type: z.object({
+ user_id: z.string(),
+ welcome_email_sent: z.boolean(),
+ }),
+})
+
+const AuditEvent = BaseEvent.extend('AuditEvent', {
+ message: z.string(),
+})
+
+async function main(): Promise {
+ const bus = new EventBus('SimpleExampleBus')
+
+ // 2) Register a wildcard handler to observe every event flowing through this bus.
+ bus.on('*', (event: BaseEvent) => {
+ console.log(`[wildcard] ${event.event_type}#${event.event_id.slice(-8)}`)
+ })
+
+ // 3) Register by EventClass/factory (best type inference for payload + return type).
+ bus.on(RegisterUserEvent, async (event) => {
+ console.log(`[class handler] Creating account for ${event.email} (${event.plan})`)
+ return {
+ user_id: `user_${event.email.split('@')[0]}`,
+ welcome_email_sent: true,
+ }
+ })
+
+ // 4) Register by string event type (more dynamic, weaker compile-time checks).
+ bus.on('AuditEvent', (event: InstanceType) => {
+ console.log(`[string handler] Audit log: ${event.message}`)
+ })
+
+ // 5) Intentionally return an invalid result shape.
+ // This compiles because string-based registration is best-effort, but will fail
+ // at runtime because RegisterUserEvent has event_result_type enforcement.
+ bus.on('RegisterUserEvent', () => {
+ return { user_id: 123, welcome_email_sent: 'yes' } as unknown
+ })
+
+ // Dispatch a simple event handled by a string registration.
+ await bus.emit(AuditEvent({ message: 'Starting simple bubus example' })).done()
+
+ // Dispatch the typed event; one handler returns valid data, one returns invalid data.
+ const register_event = bus.emit(
+ RegisterUserEvent({
+ email: 'ada@example.com',
+ plan: 'pro',
+ })
+ )
+ await register_event.done()
+
+ // 6) Inspect per-handler results (completed vs error) from event.event_results.
+ console.log('\nRegisterUserEvent handler outcomes:')
+ for (const result of register_event.event_results.values()) {
+ if (result.status === 'completed') {
+ console.log(`- ${result.handler_name}: completed -> ${JSON.stringify(result.result)}`)
+ continue
+ }
+ if (result.status === 'error') {
+ const message = result.error instanceof Error ? result.error.message : String(result.error)
+ console.log(`- ${result.handler_name}: error -> ${message}`)
+ console.log(` raw invalid return: ${JSON.stringify(result.raw_value)}`)
+ continue
+ }
+ console.log(`- ${result.handler_name}: ${result.status}`)
+ }
+
+ // 7) Convenience getters for aggregate inspection.
+ console.log('\nFirst valid parsed result:', register_event.event_result)
+ console.log(`Total event errors: ${register_event.event_errors.length}`)
+ for (const [index, error] of register_event.event_errors.entries()) {
+ const message = error instanceof Error ? error.message : String(error)
+ console.log(` ${index + 1}. ${message}`)
+ }
+
+ await bus.waitUntilIdle()
+ console.log('\n=== bus.logTree() ===')
+ console.log(bus.logTree())
+}
+
+main().catch((error) => {
+ console.error('Example failed:', error)
+ process.exitCode = 1
+})
diff --git a/bubus-ts/package.json b/bubus-ts/package.json
new file mode 100644
index 0000000..9709420
--- /dev/null
+++ b/bubus-ts/package.json
@@ -0,0 +1,80 @@
+{
+ "name": "bubus",
+ "version": "2.1.0",
+ "description": "Event bus library for browsers and ESM Node.js",
+ "type": "module",
+ "main": "./dist/esm/index.js",
+ "module": "./dist/esm/index.js",
+ "types": "./dist/types/index.d.ts",
+ "exports": {
+ ".": {
+ "types": "./dist/types/index.d.ts",
+ "import": "./dist/esm/index.js",
+ "default": "./dist/esm/index.js"
+ }
+ },
+ "files": [
+ "dist/esm",
+ "dist/types"
+ ],
+ "scripts": {
+ "build": "pnpm run build:esm && pnpm run build:types",
+ "build:esm": "esbuild src/index.ts --bundle --format=esm --platform=neutral --target=es2022 --sourcemap --outdir=dist/esm",
+ "build:types": "tsc -p tsconfig.json --emitDeclarationOnly",
+ "typecheck": "tsc -p tsconfig.json --noEmit",
+ "prettier": "prettier --write .",
+ "eslint": "eslint .",
+ "lint": "pnpm run prettier && pnpm run eslint && pnpm run typecheck",
+ "test": "NODE_OPTIONS='--expose-gc' node --expose-gc --test --import tsx tests/**/*.test.ts",
+ "perf": "pnpm run perf:node && pnpm run perf:bun && pnpm run perf:deno && pnpm run perf:browser",
+ "debug:node": "NODE_OPTIONS='--expose-gc' node --expose-gc --import tsx",
+ "debug:bun": "bun --expose-gc run",
+ "debug:deno": "deno run --v8-flags=--expose-gc",
+ "perf:node": "pnpm run build && pnpm run debug:node -- tests/performance.runtime.ts --scenario 50k-events && pnpm run debug:node -- tests/performance.runtime.ts --scenario 500-buses-x-100-events && pnpm run debug:node -- tests/performance.runtime.ts --scenario 1-event-x-50k-parallel-handlers && pnpm run debug:node -- tests/performance.runtime.ts --scenario 50k-one-off-handlers && pnpm run debug:node -- tests/performance.runtime.ts --scenario worst-case-forwarding-timeouts && pnpm run debug:node -- tests/performance.runtime.ts --scenario cleanup-equivalence",
+ "perf:bun": "pnpm run build && pnpm run debug:bun -- tests/performance.runtime.ts --scenario 50k-events && pnpm run debug:bun -- tests/performance.runtime.ts --scenario 500-buses-x-100-events && pnpm run debug:bun -- tests/performance.runtime.ts --scenario 1-event-x-50k-parallel-handlers && pnpm run debug:bun -- tests/performance.runtime.ts --scenario 50k-one-off-handlers && pnpm run debug:bun -- tests/performance.runtime.ts --scenario worst-case-forwarding-timeouts && pnpm run debug:bun -- tests/performance.runtime.ts --scenario cleanup-equivalence",
+ "perf:deno": "pnpm run build && pnpm run debug:deno -- tests/performance.runtime.ts --scenario 50k-events && pnpm run debug:deno -- tests/performance.runtime.ts --scenario 500-buses-x-100-events && pnpm run debug:deno -- tests/performance.runtime.ts --scenario 1-event-x-50k-parallel-handlers && pnpm run debug:deno -- tests/performance.runtime.ts --scenario 50k-one-off-handlers && pnpm run debug:deno -- tests/performance.runtime.ts --scenario worst-case-forwarding-timeouts && pnpm run debug:deno -- tests/performance.runtime.ts --scenario cleanup-equivalence",
+ "perf:browser": "pnpm run build && npx --yes --package=playwright -c 'PW_BIN=\"$(command -v playwright)\"; PW_NODE_MODULES=\"$(cd \"$(dirname \"$PW_BIN\")/..\" && pwd)\"; NODE_PATH=\"$PW_NODE_MODULES\" playwright test tests/performance.browser.spec.cjs --browser=chromium --workers=1 --reporter=line --output=/tmp/bubus-playwright-results'",
+ "release:dry-run": "pnpm publish --access public --dry-run --no-git-checks",
+ "release:check": "pnpm run typecheck && pnpm test && pnpm run build"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "MIT",
+ "packageManager": "pnpm@10.29.3",
+ "dependencies": {
+ "uuid": "^11.1.0",
+ "zod": "^4.3.6"
+ },
+ "devDependencies": {
+ "@typescript-eslint/eslint-plugin": "^8.55.0",
+ "@typescript-eslint/parser": "^8.55.0",
+ "esbuild": "^0.27.3",
+ "eslint": "^9.39.2",
+ "prettier": "^3.8.1",
+ "tsx": "^4.21.0",
+ "typescript": "^5.9.3"
+ },
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/pirate/bbus.git",
+ "directory": "bubus-ts"
+ },
+ "bugs": {
+ "url": "https://github.com/pirate/bbus/issues"
+ },
+ "homepage": "https://github.com/pirate/bbus/tree/main/bubus-ts",
+ "publishConfig": {
+ "access": "public",
+ "registry": "https://registry.npmjs.org/"
+ },
+ "pnpm": {
+ "onlyBuiltDependencies": [
+ "esbuild"
+ ]
+ },
+ "optionalDependencies": {
+ "ioredis": "^5.9.3",
+ "nats": "^2.29.3",
+ "pg": "^8.18.0"
+ }
+}
diff --git a/bubus-ts/pnpm-lock.yaml b/bubus-ts/pnpm-lock.yaml
new file mode 100644
index 0000000..e03981d
--- /dev/null
+++ b/bubus-ts/pnpm-lock.yaml
@@ -0,0 +1,1461 @@
+lockfileVersion: '9.0'
+
+settings:
+ autoInstallPeers: true
+ excludeLinksFromLockfile: false
+
+importers:
+ .:
+ dependencies:
+ uuid:
+ specifier: ^11.1.0
+ version: 11.1.0
+ zod:
+ specifier: ^4.3.6
+ version: 4.3.6
+ devDependencies:
+ '@typescript-eslint/eslint-plugin':
+ specifier: ^8.55.0
+ version: 8.55.0(@typescript-eslint/parser@8.55.0(eslint@9.39.2)(typescript@5.9.3))(eslint@9.39.2)(typescript@5.9.3)
+ '@typescript-eslint/parser':
+ specifier: ^8.55.0
+ version: 8.55.0(eslint@9.39.2)(typescript@5.9.3)
+ esbuild:
+ specifier: ^0.27.3
+ version: 0.27.3
+ eslint:
+ specifier: ^9.39.2
+ version: 9.39.2
+ prettier:
+ specifier: ^3.8.1
+ version: 3.8.1
+ tsx:
+ specifier: ^4.21.0
+ version: 4.21.0
+ typescript:
+ specifier: ^5.9.3
+ version: 5.9.3
+ optionalDependencies:
+ ioredis:
+ specifier: ^5.9.3
+ version: 5.9.3
+ nats:
+ specifier: ^2.29.3
+ version: 2.29.3
+ pg:
+ specifier: ^8.18.0
+ version: 8.18.0
+
+packages:
+ '@esbuild/aix-ppc64@0.27.3':
+ resolution: { integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg== }
+ engines: { node: '>=18' }
+ cpu: [ppc64]
+ os: [aix]
+
+ '@esbuild/android-arm64@0.27.3':
+ resolution: { integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg== }
+ engines: { node: '>=18' }
+ cpu: [arm64]
+ os: [android]
+
+ '@esbuild/android-arm@0.27.3':
+ resolution: { integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA== }
+ engines: { node: '>=18' }
+ cpu: [arm]
+ os: [android]
+
+ '@esbuild/android-x64@0.27.3':
+ resolution: { integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ== }
+ engines: { node: '>=18' }
+ cpu: [x64]
+ os: [android]
+
+ '@esbuild/darwin-arm64@0.27.3':
+ resolution: { integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg== }
+ engines: { node: '>=18' }
+ cpu: [arm64]
+ os: [darwin]
+
+ '@esbuild/darwin-x64@0.27.3':
+ resolution: { integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg== }
+ engines: { node: '>=18' }
+ cpu: [x64]
+ os: [darwin]
+
+ '@esbuild/freebsd-arm64@0.27.3':
+ resolution: { integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w== }
+ engines: { node: '>=18' }
+ cpu: [arm64]
+ os: [freebsd]
+
+ '@esbuild/freebsd-x64@0.27.3':
+ resolution: { integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA== }
+ engines: { node: '>=18' }
+ cpu: [x64]
+ os: [freebsd]
+
+ '@esbuild/linux-arm64@0.27.3':
+ resolution: { integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg== }
+ engines: { node: '>=18' }
+ cpu: [arm64]
+ os: [linux]
+
+ '@esbuild/linux-arm@0.27.3':
+ resolution: { integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw== }
+ engines: { node: '>=18' }
+ cpu: [arm]
+ os: [linux]
+
+ '@esbuild/linux-ia32@0.27.3':
+ resolution: { integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg== }
+ engines: { node: '>=18' }
+ cpu: [ia32]
+ os: [linux]
+
+ '@esbuild/linux-loong64@0.27.3':
+ resolution: { integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA== }
+ engines: { node: '>=18' }
+ cpu: [loong64]
+ os: [linux]
+
+ '@esbuild/linux-mips64el@0.27.3':
+ resolution: { integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw== }
+ engines: { node: '>=18' }
+ cpu: [mips64el]
+ os: [linux]
+
+ '@esbuild/linux-ppc64@0.27.3':
+ resolution: { integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA== }
+ engines: { node: '>=18' }
+ cpu: [ppc64]
+ os: [linux]
+
+ '@esbuild/linux-riscv64@0.27.3':
+ resolution: { integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ== }
+ engines: { node: '>=18' }
+ cpu: [riscv64]
+ os: [linux]
+
+ '@esbuild/linux-s390x@0.27.3':
+ resolution: { integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw== }
+ engines: { node: '>=18' }
+ cpu: [s390x]
+ os: [linux]
+
+ '@esbuild/linux-x64@0.27.3':
+ resolution: { integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA== }
+ engines: { node: '>=18' }
+ cpu: [x64]
+ os: [linux]
+
+ '@esbuild/netbsd-arm64@0.27.3':
+ resolution: { integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA== }
+ engines: { node: '>=18' }
+ cpu: [arm64]
+ os: [netbsd]
+
+ '@esbuild/netbsd-x64@0.27.3':
+ resolution: { integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA== }
+ engines: { node: '>=18' }
+ cpu: [x64]
+ os: [netbsd]
+
+ '@esbuild/openbsd-arm64@0.27.3':
+ resolution: { integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw== }
+ engines: { node: '>=18' }
+ cpu: [arm64]
+ os: [openbsd]
+
+ '@esbuild/openbsd-x64@0.27.3':
+ resolution: { integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ== }
+ engines: { node: '>=18' }
+ cpu: [x64]
+ os: [openbsd]
+
+ '@esbuild/openharmony-arm64@0.27.3':
+ resolution: { integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g== }
+ engines: { node: '>=18' }
+ cpu: [arm64]
+ os: [openharmony]
+
+ '@esbuild/sunos-x64@0.27.3':
+ resolution: { integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA== }
+ engines: { node: '>=18' }
+ cpu: [x64]
+ os: [sunos]
+
+ '@esbuild/win32-arm64@0.27.3':
+ resolution: { integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA== }
+ engines: { node: '>=18' }
+ cpu: [arm64]
+ os: [win32]
+
+ '@esbuild/win32-ia32@0.27.3':
+ resolution: { integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q== }
+ engines: { node: '>=18' }
+ cpu: [ia32]
+ os: [win32]
+
+ '@esbuild/win32-x64@0.27.3':
+ resolution: { integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA== }
+ engines: { node: '>=18' }
+ cpu: [x64]
+ os: [win32]
+
+ '@eslint-community/eslint-utils@4.9.1':
+ resolution: { integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ== }
+ engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 }
+ peerDependencies:
+ eslint: ^6.0.0 || ^7.0.0 || >=8.0.0
+
+ '@eslint-community/regexpp@4.12.2':
+ resolution: { integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew== }
+ engines: { node: ^12.0.0 || ^14.0.0 || >=16.0.0 }
+
+ '@eslint/config-array@0.21.1':
+ resolution: { integrity: sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ '@eslint/config-helpers@0.4.2':
+ resolution: { integrity: sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ '@eslint/core@0.17.0':
+ resolution: { integrity: sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ '@eslint/eslintrc@3.3.3':
+ resolution: { integrity: sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ '@eslint/js@9.39.2':
+ resolution: { integrity: sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ '@eslint/object-schema@2.1.7':
+ resolution: { integrity: sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ '@eslint/plugin-kit@0.4.1':
+ resolution: { integrity: sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ '@humanfs/core@0.19.1':
+ resolution: { integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA== }
+ engines: { node: '>=18.18.0' }
+
+ '@humanfs/node@0.16.7':
+ resolution: { integrity: sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ== }
+ engines: { node: '>=18.18.0' }
+
+ '@humanwhocodes/module-importer@1.0.1':
+ resolution: { integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA== }
+ engines: { node: '>=12.22' }
+
+ '@humanwhocodes/retry@0.4.3':
+ resolution: { integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ== }
+ engines: { node: '>=18.18' }
+
+ '@ioredis/commands@1.5.0':
+ resolution: { integrity: sha512-eUgLqrMf8nJkZxT24JvVRrQya1vZkQh8BBeYNwGDqa5I0VUi8ACx7uFvAaLxintokpTenkK6DASvo/bvNbBGow== }
+
+ '@types/estree@1.0.8':
+ resolution: { integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w== }
+
+ '@types/json-schema@7.0.15':
+ resolution: { integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== }
+
+ '@typescript-eslint/eslint-plugin@8.55.0':
+ resolution: { integrity: sha512-1y/MVSz0NglV1ijHC8OT49mPJ4qhPYjiK08YUQVbIOyu+5k862LKUHFkpKHWu//zmr7hDR2rhwUm6gnCGNmGBQ== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+ peerDependencies:
+ '@typescript-eslint/parser': ^8.55.0
+ eslint: ^8.57.0 || ^9.0.0
+ typescript: '>=4.8.4 <6.0.0'
+
+ '@typescript-eslint/parser@8.55.0':
+ resolution: { integrity: sha512-4z2nCSBfVIMnbuu8uinj+f0o4qOeggYJLbjpPHka3KH1om7e+H9yLKTYgksTaHcGco+NClhhY2vyO3HsMH1RGw== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+ peerDependencies:
+ eslint: ^8.57.0 || ^9.0.0
+ typescript: '>=4.8.4 <6.0.0'
+
+ '@typescript-eslint/project-service@8.55.0':
+ resolution: { integrity: sha512-zRcVVPFUYWa3kNnjaZGXSu3xkKV1zXy8M4nO/pElzQhFweb7PPtluDLQtKArEOGmjXoRjnUZ29NjOiF0eCDkcQ== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+ peerDependencies:
+ typescript: '>=4.8.4 <6.0.0'
+
+ '@typescript-eslint/scope-manager@8.55.0':
+ resolution: { integrity: sha512-fVu5Omrd3jeqeQLiB9f1YsuK/iHFOwb04bCtY4BSCLgjNbOD33ZdV6KyEqplHr+IlpgT0QTZ/iJ+wT7hvTx49Q== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ '@typescript-eslint/tsconfig-utils@8.55.0':
+ resolution: { integrity: sha512-1R9cXqY7RQd7WuqSN47PK9EDpgFUK3VqdmbYrvWJZYDd0cavROGn+74ktWBlmJ13NXUQKlZ/iAEQHI/V0kKe0Q== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+ peerDependencies:
+ typescript: '>=4.8.4 <6.0.0'
+
+ '@typescript-eslint/type-utils@8.55.0':
+ resolution: { integrity: sha512-x1iH2unH4qAt6I37I2CGlsNs+B9WGxurP2uyZLRz6UJoZWDBx9cJL1xVN/FiOmHEONEg6RIufdvyT0TEYIgC5g== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+ peerDependencies:
+ eslint: ^8.57.0 || ^9.0.0
+ typescript: '>=4.8.4 <6.0.0'
+
+ '@typescript-eslint/types@8.55.0':
+ resolution: { integrity: sha512-ujT0Je8GI5BJWi+/mMoR0wxwVEQaxM+pi30xuMiJETlX80OPovb2p9E8ss87gnSVtYXtJoU9U1Cowcr6w2FE0w== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ '@typescript-eslint/typescript-estree@8.55.0':
+ resolution: { integrity: sha512-EwrH67bSWdx/3aRQhCoxDaHM+CrZjotc2UCCpEDVqfCE+7OjKAGWNY2HsCSTEVvWH2clYQK8pdeLp42EVs+xQw== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+ peerDependencies:
+ typescript: '>=4.8.4 <6.0.0'
+
+ '@typescript-eslint/utils@8.55.0':
+ resolution: { integrity: sha512-BqZEsnPGdYpgyEIkDC1BadNY8oMwckftxBT+C8W0g1iKPdeqKZBtTfnvcq0nf60u7MkjFO8RBvpRGZBPw4L2ow== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+ peerDependencies:
+ eslint: ^8.57.0 || ^9.0.0
+ typescript: '>=4.8.4 <6.0.0'
+
+ '@typescript-eslint/visitor-keys@8.55.0':
+ resolution: { integrity: sha512-AxNRwEie8Nn4eFS1FzDMJWIISMGoXMb037sgCBJ3UR6o0fQTzr2tqN9WT+DkWJPhIdQCfV7T6D387566VtnCJA== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ acorn-jsx@5.3.2:
+ resolution: { integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== }
+ peerDependencies:
+ acorn: ^6.0.0 || ^7.0.0 || ^8.0.0
+
+ acorn@8.15.0:
+ resolution: { integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg== }
+ engines: { node: '>=0.4.0' }
+ hasBin: true
+
+ ajv@6.12.6:
+ resolution: { integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== }
+
+ ansi-styles@4.3.0:
+ resolution: { integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== }
+ engines: { node: '>=8' }
+
+ argparse@2.0.1:
+ resolution: { integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== }
+
+ balanced-match@1.0.2:
+ resolution: { integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== }
+
+ brace-expansion@1.1.12:
+ resolution: { integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg== }
+
+ brace-expansion@2.0.2:
+ resolution: { integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ== }
+
+ callsites@3.1.0:
+ resolution: { integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== }
+ engines: { node: '>=6' }
+
+ chalk@4.1.2:
+ resolution: { integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== }
+ engines: { node: '>=10' }
+
+ cluster-key-slot@1.1.2:
+ resolution: { integrity: sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA== }
+ engines: { node: '>=0.10.0' }
+
+ color-convert@2.0.1:
+ resolution: { integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== }
+ engines: { node: '>=7.0.0' }
+
+ color-name@1.1.4:
+ resolution: { integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== }
+
+ concat-map@0.0.1:
+ resolution: { integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== }
+
+ cross-spawn@7.0.6:
+ resolution: { integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== }
+ engines: { node: '>= 8' }
+
+ debug@4.4.3:
+ resolution: { integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA== }
+ engines: { node: '>=6.0' }
+ peerDependencies:
+ supports-color: '*'
+ peerDependenciesMeta:
+ supports-color:
+ optional: true
+
+ deep-is@0.1.4:
+ resolution: { integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ== }
+
+ denque@2.1.0:
+ resolution: { integrity: sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw== }
+ engines: { node: '>=0.10' }
+
+ esbuild@0.27.3:
+ resolution: { integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg== }
+ engines: { node: '>=18' }
+ hasBin: true
+
+ escape-string-regexp@4.0.0:
+ resolution: { integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== }
+ engines: { node: '>=10' }
+
+ eslint-scope@8.4.0:
+ resolution: { integrity: sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ eslint-visitor-keys@3.4.3:
+ resolution: { integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== }
+ engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 }
+
+ eslint-visitor-keys@4.2.1:
+ resolution: { integrity: sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ eslint@9.39.2:
+ resolution: { integrity: sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+ hasBin: true
+ peerDependencies:
+ jiti: '*'
+ peerDependenciesMeta:
+ jiti:
+ optional: true
+
+ espree@10.4.0:
+ resolution: { integrity: sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ== }
+ engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 }
+
+ esquery@1.7.0:
+ resolution: { integrity: sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g== }
+ engines: { node: '>=0.10' }
+
+ esrecurse@4.3.0:
+ resolution: { integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag== }
+ engines: { node: '>=4.0' }
+
+ estraverse@5.3.0:
+ resolution: { integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA== }
+ engines: { node: '>=4.0' }
+
+ esutils@2.0.3:
+ resolution: { integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== }
+ engines: { node: '>=0.10.0' }
+
+ fast-deep-equal@3.1.3:
+ resolution: { integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== }
+
+ fast-json-stable-stringify@2.1.0:
+ resolution: { integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== }
+
+ fast-levenshtein@2.0.6:
+ resolution: { integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== }
+
+ fdir@6.5.0:
+ resolution: { integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg== }
+ engines: { node: '>=12.0.0' }
+ peerDependencies:
+ picomatch: ^3 || ^4
+ peerDependenciesMeta:
+ picomatch:
+ optional: true
+
+ file-entry-cache@8.0.0:
+ resolution: { integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ== }
+ engines: { node: '>=16.0.0' }
+
+ find-up@5.0.0:
+ resolution: { integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== }
+ engines: { node: '>=10' }
+
+ flat-cache@4.0.1:
+ resolution: { integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw== }
+ engines: { node: '>=16' }
+
+ flatted@3.3.3:
+ resolution: { integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg== }
+
+ fsevents@2.3.3:
+ resolution: { integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== }
+ engines: { node: ^8.16.0 || ^10.6.0 || >=11.0.0 }
+ os: [darwin]
+
+ get-tsconfig@4.13.6:
+ resolution: { integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw== }
+
+ glob-parent@6.0.2:
+ resolution: { integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A== }
+ engines: { node: '>=10.13.0' }
+
+ globals@14.0.0:
+ resolution: { integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ== }
+ engines: { node: '>=18' }
+
+ has-flag@4.0.0:
+ resolution: { integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== }
+ engines: { node: '>=8' }
+
+ ignore@5.3.2:
+ resolution: { integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== }
+ engines: { node: '>= 4' }
+
+ ignore@7.0.5:
+ resolution: { integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg== }
+ engines: { node: '>= 4' }
+
+ import-fresh@3.3.1:
+ resolution: { integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ== }
+ engines: { node: '>=6' }
+
+ imurmurhash@0.1.4:
+ resolution: { integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA== }
+ engines: { node: '>=0.8.19' }
+
+ ioredis@5.9.3:
+ resolution: { integrity: sha512-VI5tMCdeoxZWU5vjHWsiE/Su76JGhBvWF1MJnV9ZtGltHk9BmD48oDq8Tj8haZ85aceXZMxLNDQZRVo5QKNgXA== }
+ engines: { node: '>=12.22.0' }
+
+ is-extglob@2.1.1:
+ resolution: { integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== }
+ engines: { node: '>=0.10.0' }
+
+ is-glob@4.0.3:
+ resolution: { integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== }
+ engines: { node: '>=0.10.0' }
+
+ isexe@2.0.0:
+ resolution: { integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== }
+
+ js-yaml@4.1.1:
+ resolution: { integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== }
+ hasBin: true
+
+ json-buffer@3.0.1:
+ resolution: { integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ== }
+
+ json-schema-traverse@0.4.1:
+ resolution: { integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== }
+
+ json-stable-stringify-without-jsonify@1.0.1:
+ resolution: { integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== }
+
+ keyv@4.5.4:
+ resolution: { integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw== }
+
+ levn@0.4.1:
+ resolution: { integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ== }
+ engines: { node: '>= 0.8.0' }
+
+ locate-path@6.0.0:
+ resolution: { integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== }
+ engines: { node: '>=10' }
+
+ lodash.defaults@4.2.0:
+ resolution: { integrity: sha512-qjxPLHd3r5DnsdGacqOMU6pb/avJzdh9tFX2ymgoZE27BmjXrNy/y4LoaiTeAb+O3gL8AfpJGtqfX/ae2leYYQ== }
+
+ lodash.isarguments@3.1.0:
+ resolution: { integrity: sha512-chi4NHZlZqZD18a0imDHnZPrDeBbTtVN7GXMwuGdRH9qotxAjYs3aVLKc7zNOG9eddR5Ksd8rvFEBc9SsggPpg== }
+
+ lodash.merge@4.6.2:
+ resolution: { integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== }
+
+ minimatch@3.1.2:
+ resolution: { integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== }
+
+ minimatch@9.0.5:
+ resolution: { integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== }
+ engines: { node: '>=16 || 14 >=14.17' }
+
+ ms@2.1.3:
+ resolution: { integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== }
+
+ nats@2.29.3:
+ resolution: { integrity: sha512-tOQCRCwC74DgBTk4pWZ9V45sk4d7peoE2njVprMRCBXrhJ5q5cYM7i6W+Uvw2qUrcfOSnuisrX7bEx3b3Wx4QA== }
+ engines: { node: '>= 14.0.0' }
+
+ natural-compare@1.4.0:
+ resolution: { integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw== }
+
+ nkeys.js@1.1.0:
+ resolution: { integrity: sha512-tB/a0shZL5UZWSwsoeyqfTszONTt4k2YS0tuQioMOD180+MbombYVgzDUYHlx+gejYK6rgf08n/2Df99WY0Sxg== }
+ engines: { node: '>=10.0.0' }
+
+ optionator@0.9.4:
+ resolution: { integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g== }
+ engines: { node: '>= 0.8.0' }
+
+ p-limit@3.1.0:
+ resolution: { integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== }
+ engines: { node: '>=10' }
+
+ p-locate@5.0.0:
+ resolution: { integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== }
+ engines: { node: '>=10' }
+
+ parent-module@1.0.1:
+ resolution: { integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== }
+ engines: { node: '>=6' }
+
+ path-exists@4.0.0:
+ resolution: { integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== }
+ engines: { node: '>=8' }
+
+ path-key@3.1.1:
+ resolution: { integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== }
+ engines: { node: '>=8' }
+
+ pg-cloudflare@1.3.0:
+ resolution: { integrity: sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ== }
+
+ pg-connection-string@2.11.0:
+ resolution: { integrity: sha512-kecgoJwhOpxYU21rZjULrmrBJ698U2RxXofKVzOn5UDj61BPj/qMb7diYUR1nLScCDbrztQFl1TaQZT0t1EtzQ== }
+
+ pg-int8@1.0.1:
+ resolution: { integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw== }
+ engines: { node: '>=4.0.0' }
+
+ pg-pool@3.11.0:
+ resolution: { integrity: sha512-MJYfvHwtGp870aeusDh+hg9apvOe2zmpZJpyt+BMtzUWlVqbhFmMK6bOBXLBUPd7iRtIF9fZplDc7KrPN3PN7w== }
+ peerDependencies:
+ pg: '>=8.0'
+
+ pg-protocol@1.11.0:
+ resolution: { integrity: sha512-pfsxk2M9M3BuGgDOfuy37VNRRX3jmKgMjcvAcWqNDpZSf4cUmv8HSOl5ViRQFsfARFn0KuUQTgLxVMbNq5NW3g== }
+
+ pg-types@2.2.0:
+ resolution: { integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA== }
+ engines: { node: '>=4' }
+
+ pg@8.18.0:
+ resolution: { integrity: sha512-xqrUDL1b9MbkydY/s+VZ6v+xiMUmOUk7SS9d/1kpyQxoJ6U9AO1oIJyUWVZojbfe5Cc/oluutcgFG4L9RDP1iQ== }
+ engines: { node: '>= 16.0.0' }
+ peerDependencies:
+ pg-native: '>=3.0.1'
+ peerDependenciesMeta:
+ pg-native:
+ optional: true
+
+ pgpass@1.0.5:
+ resolution: { integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug== }
+
+ picomatch@4.0.3:
+ resolution: { integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q== }
+ engines: { node: '>=12' }
+
+ postgres-array@2.0.0:
+ resolution: { integrity: sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA== }
+ engines: { node: '>=4' }
+
+ postgres-bytea@1.0.1:
+ resolution: { integrity: sha512-5+5HqXnsZPE65IJZSMkZtURARZelel2oXUEO8rH83VS/hxH5vv1uHquPg5wZs8yMAfdv971IU+kcPUczi7NVBQ== }
+ engines: { node: '>=0.10.0' }
+
+ postgres-date@1.0.7:
+ resolution: { integrity: sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q== }
+ engines: { node: '>=0.10.0' }
+
+ postgres-interval@1.2.0:
+ resolution: { integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ== }
+ engines: { node: '>=0.10.0' }
+
+ prelude-ls@1.2.1:
+ resolution: { integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== }
+ engines: { node: '>= 0.8.0' }
+
+ prettier@3.8.1:
+ resolution: { integrity: sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg== }
+ engines: { node: '>=14' }
+ hasBin: true
+
+ punycode@2.3.1:
+ resolution: { integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== }
+ engines: { node: '>=6' }
+
+ redis-errors@1.2.0:
+ resolution: { integrity: sha512-1qny3OExCf0UvUV/5wpYKf2YwPcOqXzkwKKSmKHiE6ZMQs5heeE/c8eXK+PNllPvmjgAbfnsbpkGZWy8cBpn9w== }
+ engines: { node: '>=4' }
+
+ redis-parser@3.0.0:
+ resolution: { integrity: sha512-DJnGAeenTdpMEH6uAJRK/uiyEIH9WVsUmoLwzudwGJUwZPp80PDBWPHXSAGNPwNvIXAbe7MSUB1zQFugFml66A== }
+ engines: { node: '>=4' }
+
+ resolve-from@4.0.0:
+ resolution: { integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== }
+ engines: { node: '>=4' }
+
+ resolve-pkg-maps@1.0.0:
+ resolution: { integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw== }
+
+ semver@7.7.4:
+ resolution: { integrity: sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA== }
+ engines: { node: '>=10' }
+ hasBin: true
+
+ shebang-command@2.0.0:
+ resolution: { integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== }
+ engines: { node: '>=8' }
+
+ shebang-regex@3.0.0:
+ resolution: { integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== }
+ engines: { node: '>=8' }
+
+ split2@4.2.0:
+ resolution: { integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg== }
+ engines: { node: '>= 10.x' }
+
+ standard-as-callback@2.1.0:
+ resolution: { integrity: sha512-qoRRSyROncaz1z0mvYqIE4lCd9p2R90i6GxW3uZv5ucSu8tU7B5HXUP1gG8pVZsYNVaXjk8ClXHPttLyxAL48A== }
+
+ strip-json-comments@3.1.1:
+ resolution: { integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== }
+ engines: { node: '>=8' }
+
+ supports-color@7.2.0:
+ resolution: { integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== }
+ engines: { node: '>=8' }
+
+ tinyglobby@0.2.15:
+ resolution: { integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ== }
+ engines: { node: '>=12.0.0' }
+
+ ts-api-utils@2.4.0:
+ resolution: { integrity: sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA== }
+ engines: { node: '>=18.12' }
+ peerDependencies:
+ typescript: '>=4.8.4'
+
+ tsx@4.21.0:
+ resolution: { integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw== }
+ engines: { node: '>=18.0.0' }
+ hasBin: true
+
+ tweetnacl@1.0.3:
+ resolution: { integrity: sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw== }
+
+ type-check@0.4.0:
+ resolution: { integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew== }
+ engines: { node: '>= 0.8.0' }
+
+ typescript@5.9.3:
+ resolution: { integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw== }
+ engines: { node: '>=14.17' }
+ hasBin: true
+
+ uri-js@4.4.1:
+ resolution: { integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== }
+
+ uuid@11.1.0:
+ resolution: { integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A== }
+ hasBin: true
+
+ which@2.0.2:
+ resolution: { integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== }
+ engines: { node: '>= 8' }
+ hasBin: true
+
+ word-wrap@1.2.5:
+ resolution: { integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA== }
+ engines: { node: '>=0.10.0' }
+
+ xtend@4.0.2:
+ resolution: { integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== }
+ engines: { node: '>=0.4' }
+
+ yocto-queue@0.1.0:
+ resolution: { integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== }
+ engines: { node: '>=10' }
+
+ zod@4.3.6:
+ resolution: { integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg== }
+
+snapshots:
+ '@esbuild/aix-ppc64@0.27.3':
+ optional: true
+
+ '@esbuild/android-arm64@0.27.3':
+ optional: true
+
+ '@esbuild/android-arm@0.27.3':
+ optional: true
+
+ '@esbuild/android-x64@0.27.3':
+ optional: true
+
+ '@esbuild/darwin-arm64@0.27.3':
+ optional: true
+
+ '@esbuild/darwin-x64@0.27.3':
+ optional: true
+
+ '@esbuild/freebsd-arm64@0.27.3':
+ optional: true
+
+ '@esbuild/freebsd-x64@0.27.3':
+ optional: true
+
+ '@esbuild/linux-arm64@0.27.3':
+ optional: true
+
+ '@esbuild/linux-arm@0.27.3':
+ optional: true
+
+ '@esbuild/linux-ia32@0.27.3':
+ optional: true
+
+ '@esbuild/linux-loong64@0.27.3':
+ optional: true
+
+ '@esbuild/linux-mips64el@0.27.3':
+ optional: true
+
+ '@esbuild/linux-ppc64@0.27.3':
+ optional: true
+
+ '@esbuild/linux-riscv64@0.27.3':
+ optional: true
+
+ '@esbuild/linux-s390x@0.27.3':
+ optional: true
+
+ '@esbuild/linux-x64@0.27.3':
+ optional: true
+
+ '@esbuild/netbsd-arm64@0.27.3':
+ optional: true
+
+ '@esbuild/netbsd-x64@0.27.3':
+ optional: true
+
+ '@esbuild/openbsd-arm64@0.27.3':
+ optional: true
+
+ '@esbuild/openbsd-x64@0.27.3':
+ optional: true
+
+ '@esbuild/openharmony-arm64@0.27.3':
+ optional: true
+
+ '@esbuild/sunos-x64@0.27.3':
+ optional: true
+
+ '@esbuild/win32-arm64@0.27.3':
+ optional: true
+
+ '@esbuild/win32-ia32@0.27.3':
+ optional: true
+
+ '@esbuild/win32-x64@0.27.3':
+ optional: true
+
+ '@eslint-community/eslint-utils@4.9.1(eslint@9.39.2)':
+ dependencies:
+ eslint: 9.39.2
+ eslint-visitor-keys: 3.4.3
+
+ '@eslint-community/regexpp@4.12.2': {}
+
+ '@eslint/config-array@0.21.1':
+ dependencies:
+ '@eslint/object-schema': 2.1.7
+ debug: 4.4.3
+ minimatch: 3.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ '@eslint/config-helpers@0.4.2':
+ dependencies:
+ '@eslint/core': 0.17.0
+
+ '@eslint/core@0.17.0':
+ dependencies:
+ '@types/json-schema': 7.0.15
+
+ '@eslint/eslintrc@3.3.3':
+ dependencies:
+ ajv: 6.12.6
+ debug: 4.4.3
+ espree: 10.4.0
+ globals: 14.0.0
+ ignore: 5.3.2
+ import-fresh: 3.3.1
+ js-yaml: 4.1.1
+ minimatch: 3.1.2
+ strip-json-comments: 3.1.1
+ transitivePeerDependencies:
+ - supports-color
+
+ '@eslint/js@9.39.2': {}
+
+ '@eslint/object-schema@2.1.7': {}
+
+ '@eslint/plugin-kit@0.4.1':
+ dependencies:
+ '@eslint/core': 0.17.0
+ levn: 0.4.1
+
+ '@humanfs/core@0.19.1': {}
+
+ '@humanfs/node@0.16.7':
+ dependencies:
+ '@humanfs/core': 0.19.1
+ '@humanwhocodes/retry': 0.4.3
+
+ '@humanwhocodes/module-importer@1.0.1': {}
+
+ '@humanwhocodes/retry@0.4.3': {}
+
+ '@ioredis/commands@1.5.0':
+ optional: true
+
+ '@types/estree@1.0.8': {}
+
+ '@types/json-schema@7.0.15': {}
+
+ '@typescript-eslint/eslint-plugin@8.55.0(@typescript-eslint/parser@8.55.0(eslint@9.39.2)(typescript@5.9.3))(eslint@9.39.2)(typescript@5.9.3)':
+ dependencies:
+ '@eslint-community/regexpp': 4.12.2
+ '@typescript-eslint/parser': 8.55.0(eslint@9.39.2)(typescript@5.9.3)
+ '@typescript-eslint/scope-manager': 8.55.0
+ '@typescript-eslint/type-utils': 8.55.0(eslint@9.39.2)(typescript@5.9.3)
+ '@typescript-eslint/utils': 8.55.0(eslint@9.39.2)(typescript@5.9.3)
+ '@typescript-eslint/visitor-keys': 8.55.0
+ eslint: 9.39.2
+ ignore: 7.0.5
+ natural-compare: 1.4.0
+ ts-api-utils: 2.4.0(typescript@5.9.3)
+ typescript: 5.9.3
+ transitivePeerDependencies:
+ - supports-color
+
+ '@typescript-eslint/parser@8.55.0(eslint@9.39.2)(typescript@5.9.3)':
+ dependencies:
+ '@typescript-eslint/scope-manager': 8.55.0
+ '@typescript-eslint/types': 8.55.0
+ '@typescript-eslint/typescript-estree': 8.55.0(typescript@5.9.3)
+ '@typescript-eslint/visitor-keys': 8.55.0
+ debug: 4.4.3
+ eslint: 9.39.2
+ typescript: 5.9.3
+ transitivePeerDependencies:
+ - supports-color
+
+ '@typescript-eslint/project-service@8.55.0(typescript@5.9.3)':
+ dependencies:
+ '@typescript-eslint/tsconfig-utils': 8.55.0(typescript@5.9.3)
+ '@typescript-eslint/types': 8.55.0
+ debug: 4.4.3
+ typescript: 5.9.3
+ transitivePeerDependencies:
+ - supports-color
+
+ '@typescript-eslint/scope-manager@8.55.0':
+ dependencies:
+ '@typescript-eslint/types': 8.55.0
+ '@typescript-eslint/visitor-keys': 8.55.0
+
+ '@typescript-eslint/tsconfig-utils@8.55.0(typescript@5.9.3)':
+ dependencies:
+ typescript: 5.9.3
+
+ '@typescript-eslint/type-utils@8.55.0(eslint@9.39.2)(typescript@5.9.3)':
+ dependencies:
+ '@typescript-eslint/types': 8.55.0
+ '@typescript-eslint/typescript-estree': 8.55.0(typescript@5.9.3)
+ '@typescript-eslint/utils': 8.55.0(eslint@9.39.2)(typescript@5.9.3)
+ debug: 4.4.3
+ eslint: 9.39.2
+ ts-api-utils: 2.4.0(typescript@5.9.3)
+ typescript: 5.9.3
+ transitivePeerDependencies:
+ - supports-color
+
+ '@typescript-eslint/types@8.55.0': {}
+
+ '@typescript-eslint/typescript-estree@8.55.0(typescript@5.9.3)':
+ dependencies:
+ '@typescript-eslint/project-service': 8.55.0(typescript@5.9.3)
+ '@typescript-eslint/tsconfig-utils': 8.55.0(typescript@5.9.3)
+ '@typescript-eslint/types': 8.55.0
+ '@typescript-eslint/visitor-keys': 8.55.0
+ debug: 4.4.3
+ minimatch: 9.0.5
+ semver: 7.7.4
+ tinyglobby: 0.2.15
+ ts-api-utils: 2.4.0(typescript@5.9.3)
+ typescript: 5.9.3
+ transitivePeerDependencies:
+ - supports-color
+
+ '@typescript-eslint/utils@8.55.0(eslint@9.39.2)(typescript@5.9.3)':
+ dependencies:
+ '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.2)
+ '@typescript-eslint/scope-manager': 8.55.0
+ '@typescript-eslint/types': 8.55.0
+ '@typescript-eslint/typescript-estree': 8.55.0(typescript@5.9.3)
+ eslint: 9.39.2
+ typescript: 5.9.3
+ transitivePeerDependencies:
+ - supports-color
+
+ '@typescript-eslint/visitor-keys@8.55.0':
+ dependencies:
+ '@typescript-eslint/types': 8.55.0
+ eslint-visitor-keys: 4.2.1
+
+ acorn-jsx@5.3.2(acorn@8.15.0):
+ dependencies:
+ acorn: 8.15.0
+
+ acorn@8.15.0: {}
+
+ ajv@6.12.6:
+ dependencies:
+ fast-deep-equal: 3.1.3
+ fast-json-stable-stringify: 2.1.0
+ json-schema-traverse: 0.4.1
+ uri-js: 4.4.1
+
+ ansi-styles@4.3.0:
+ dependencies:
+ color-convert: 2.0.1
+
+ argparse@2.0.1: {}
+
+ balanced-match@1.0.2: {}
+
+ brace-expansion@1.1.12:
+ dependencies:
+ balanced-match: 1.0.2
+ concat-map: 0.0.1
+
+ brace-expansion@2.0.2:
+ dependencies:
+ balanced-match: 1.0.2
+
+ callsites@3.1.0: {}
+
+ chalk@4.1.2:
+ dependencies:
+ ansi-styles: 4.3.0
+ supports-color: 7.2.0
+
+ cluster-key-slot@1.1.2:
+ optional: true
+
+ color-convert@2.0.1:
+ dependencies:
+ color-name: 1.1.4
+
+ color-name@1.1.4: {}
+
+ concat-map@0.0.1: {}
+
+ cross-spawn@7.0.6:
+ dependencies:
+ path-key: 3.1.1
+ shebang-command: 2.0.0
+ which: 2.0.2
+
+ debug@4.4.3:
+ dependencies:
+ ms: 2.1.3
+
+ deep-is@0.1.4: {}
+
+ denque@2.1.0:
+ optional: true
+
+ esbuild@0.27.3:
+ optionalDependencies:
+ '@esbuild/aix-ppc64': 0.27.3
+ '@esbuild/android-arm': 0.27.3
+ '@esbuild/android-arm64': 0.27.3
+ '@esbuild/android-x64': 0.27.3
+ '@esbuild/darwin-arm64': 0.27.3
+ '@esbuild/darwin-x64': 0.27.3
+ '@esbuild/freebsd-arm64': 0.27.3
+ '@esbuild/freebsd-x64': 0.27.3
+ '@esbuild/linux-arm': 0.27.3
+ '@esbuild/linux-arm64': 0.27.3
+ '@esbuild/linux-ia32': 0.27.3
+ '@esbuild/linux-loong64': 0.27.3
+ '@esbuild/linux-mips64el': 0.27.3
+ '@esbuild/linux-ppc64': 0.27.3
+ '@esbuild/linux-riscv64': 0.27.3
+ '@esbuild/linux-s390x': 0.27.3
+ '@esbuild/linux-x64': 0.27.3
+ '@esbuild/netbsd-arm64': 0.27.3
+ '@esbuild/netbsd-x64': 0.27.3
+ '@esbuild/openbsd-arm64': 0.27.3
+ '@esbuild/openbsd-x64': 0.27.3
+ '@esbuild/openharmony-arm64': 0.27.3
+ '@esbuild/sunos-x64': 0.27.3
+ '@esbuild/win32-arm64': 0.27.3
+ '@esbuild/win32-ia32': 0.27.3
+ '@esbuild/win32-x64': 0.27.3
+
+ escape-string-regexp@4.0.0: {}
+
+ eslint-scope@8.4.0:
+ dependencies:
+ esrecurse: 4.3.0
+ estraverse: 5.3.0
+
+ eslint-visitor-keys@3.4.3: {}
+
+ eslint-visitor-keys@4.2.1: {}
+
+ eslint@9.39.2:
+ dependencies:
+ '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.2)
+ '@eslint-community/regexpp': 4.12.2
+ '@eslint/config-array': 0.21.1
+ '@eslint/config-helpers': 0.4.2
+ '@eslint/core': 0.17.0
+ '@eslint/eslintrc': 3.3.3
+ '@eslint/js': 9.39.2
+ '@eslint/plugin-kit': 0.4.1
+ '@humanfs/node': 0.16.7
+ '@humanwhocodes/module-importer': 1.0.1
+ '@humanwhocodes/retry': 0.4.3
+ '@types/estree': 1.0.8
+ ajv: 6.12.6
+ chalk: 4.1.2
+ cross-spawn: 7.0.6
+ debug: 4.4.3
+ escape-string-regexp: 4.0.0
+ eslint-scope: 8.4.0
+ eslint-visitor-keys: 4.2.1
+ espree: 10.4.0
+ esquery: 1.7.0
+ esutils: 2.0.3
+ fast-deep-equal: 3.1.3
+ file-entry-cache: 8.0.0
+ find-up: 5.0.0
+ glob-parent: 6.0.2
+ ignore: 5.3.2
+ imurmurhash: 0.1.4
+ is-glob: 4.0.3
+ json-stable-stringify-without-jsonify: 1.0.1
+ lodash.merge: 4.6.2
+ minimatch: 3.1.2
+ natural-compare: 1.4.0
+ optionator: 0.9.4
+ transitivePeerDependencies:
+ - supports-color
+
+ espree@10.4.0:
+ dependencies:
+ acorn: 8.15.0
+ acorn-jsx: 5.3.2(acorn@8.15.0)
+ eslint-visitor-keys: 4.2.1
+
+ esquery@1.7.0:
+ dependencies:
+ estraverse: 5.3.0
+
+ esrecurse@4.3.0:
+ dependencies:
+ estraverse: 5.3.0
+
+ estraverse@5.3.0: {}
+
+ esutils@2.0.3: {}
+
+ fast-deep-equal@3.1.3: {}
+
+ fast-json-stable-stringify@2.1.0: {}
+
+ fast-levenshtein@2.0.6: {}
+
+ fdir@6.5.0(picomatch@4.0.3):
+ optionalDependencies:
+ picomatch: 4.0.3
+
+ file-entry-cache@8.0.0:
+ dependencies:
+ flat-cache: 4.0.1
+
+ find-up@5.0.0:
+ dependencies:
+ locate-path: 6.0.0
+ path-exists: 4.0.0
+
+ flat-cache@4.0.1:
+ dependencies:
+ flatted: 3.3.3
+ keyv: 4.5.4
+
+ flatted@3.3.3: {}
+
+ fsevents@2.3.3:
+ optional: true
+
+ get-tsconfig@4.13.6:
+ dependencies:
+ resolve-pkg-maps: 1.0.0
+
+ glob-parent@6.0.2:
+ dependencies:
+ is-glob: 4.0.3
+
+ globals@14.0.0: {}
+
+ has-flag@4.0.0: {}
+
+ ignore@5.3.2: {}
+
+ ignore@7.0.5: {}
+
+ import-fresh@3.3.1:
+ dependencies:
+ parent-module: 1.0.1
+ resolve-from: 4.0.0
+
+ imurmurhash@0.1.4: {}
+
+ ioredis@5.9.3:
+ dependencies:
+ '@ioredis/commands': 1.5.0
+ cluster-key-slot: 1.1.2
+ debug: 4.4.3
+ denque: 2.1.0
+ lodash.defaults: 4.2.0
+ lodash.isarguments: 3.1.0
+ redis-errors: 1.2.0
+ redis-parser: 3.0.0
+ standard-as-callback: 2.1.0
+ transitivePeerDependencies:
+ - supports-color
+ optional: true
+
+ is-extglob@2.1.1: {}
+
+ is-glob@4.0.3:
+ dependencies:
+ is-extglob: 2.1.1
+
+ isexe@2.0.0: {}
+
+ js-yaml@4.1.1:
+ dependencies:
+ argparse: 2.0.1
+
+ json-buffer@3.0.1: {}
+
+ json-schema-traverse@0.4.1: {}
+
+ json-stable-stringify-without-jsonify@1.0.1: {}
+
+ keyv@4.5.4:
+ dependencies:
+ json-buffer: 3.0.1
+
+ levn@0.4.1:
+ dependencies:
+ prelude-ls: 1.2.1
+ type-check: 0.4.0
+
+ locate-path@6.0.0:
+ dependencies:
+ p-locate: 5.0.0
+
+ lodash.defaults@4.2.0:
+ optional: true
+
+ lodash.isarguments@3.1.0:
+ optional: true
+
+ lodash.merge@4.6.2: {}
+
+ minimatch@3.1.2:
+ dependencies:
+ brace-expansion: 1.1.12
+
+ minimatch@9.0.5:
+ dependencies:
+ brace-expansion: 2.0.2
+
+ ms@2.1.3: {}
+
+ nats@2.29.3:
+ dependencies:
+ nkeys.js: 1.1.0
+ optional: true
+
+ natural-compare@1.4.0: {}
+
+ nkeys.js@1.1.0:
+ dependencies:
+ tweetnacl: 1.0.3
+ optional: true
+
+ optionator@0.9.4:
+ dependencies:
+ deep-is: 0.1.4
+ fast-levenshtein: 2.0.6
+ levn: 0.4.1
+ prelude-ls: 1.2.1
+ type-check: 0.4.0
+ word-wrap: 1.2.5
+
+ p-limit@3.1.0:
+ dependencies:
+ yocto-queue: 0.1.0
+
+ p-locate@5.0.0:
+ dependencies:
+ p-limit: 3.1.0
+
+ parent-module@1.0.1:
+ dependencies:
+ callsites: 3.1.0
+
+ path-exists@4.0.0: {}
+
+ path-key@3.1.1: {}
+
+ pg-cloudflare@1.3.0:
+ optional: true
+
+ pg-connection-string@2.11.0:
+ optional: true
+
+ pg-int8@1.0.1:
+ optional: true
+
+ pg-pool@3.11.0(pg@8.18.0):
+ dependencies:
+ pg: 8.18.0
+ optional: true
+
+ pg-protocol@1.11.0:
+ optional: true
+
+ pg-types@2.2.0:
+ dependencies:
+ pg-int8: 1.0.1
+ postgres-array: 2.0.0
+ postgres-bytea: 1.0.1
+ postgres-date: 1.0.7
+ postgres-interval: 1.2.0
+ optional: true
+
+ pg@8.18.0:
+ dependencies:
+ pg-connection-string: 2.11.0
+ pg-pool: 3.11.0(pg@8.18.0)
+ pg-protocol: 1.11.0
+ pg-types: 2.2.0
+ pgpass: 1.0.5
+ optionalDependencies:
+ pg-cloudflare: 1.3.0
+ optional: true
+
+ pgpass@1.0.5:
+ dependencies:
+ split2: 4.2.0
+ optional: true
+
+ picomatch@4.0.3: {}
+
+ postgres-array@2.0.0:
+ optional: true
+
+ postgres-bytea@1.0.1:
+ optional: true
+
+ postgres-date@1.0.7:
+ optional: true
+
+ postgres-interval@1.2.0:
+ dependencies:
+ xtend: 4.0.2
+ optional: true
+
+ prelude-ls@1.2.1: {}
+
+ prettier@3.8.1: {}
+
+ punycode@2.3.1: {}
+
+ redis-errors@1.2.0:
+ optional: true
+
+ redis-parser@3.0.0:
+ dependencies:
+ redis-errors: 1.2.0
+ optional: true
+
+ resolve-from@4.0.0: {}
+
+ resolve-pkg-maps@1.0.0: {}
+
+ semver@7.7.4: {}
+
+ shebang-command@2.0.0:
+ dependencies:
+ shebang-regex: 3.0.0
+
+ shebang-regex@3.0.0: {}
+
+ split2@4.2.0:
+ optional: true
+
+ standard-as-callback@2.1.0:
+ optional: true
+
+ strip-json-comments@3.1.1: {}
+
+ supports-color@7.2.0:
+ dependencies:
+ has-flag: 4.0.0
+
+ tinyglobby@0.2.15:
+ dependencies:
+ fdir: 6.5.0(picomatch@4.0.3)
+ picomatch: 4.0.3
+
+ ts-api-utils@2.4.0(typescript@5.9.3):
+ dependencies:
+ typescript: 5.9.3
+
+ tsx@4.21.0:
+ dependencies:
+ esbuild: 0.27.3
+ get-tsconfig: 4.13.6
+ optionalDependencies:
+ fsevents: 2.3.3
+
+ tweetnacl@1.0.3:
+ optional: true
+
+ type-check@0.4.0:
+ dependencies:
+ prelude-ls: 1.2.1
+
+ typescript@5.9.3: {}
+
+ uri-js@4.4.1:
+ dependencies:
+ punycode: 2.3.1
+
+ uuid@11.1.0: {}
+
+ which@2.0.2:
+ dependencies:
+ isexe: 2.0.0
+
+ word-wrap@1.2.5: {}
+
+ xtend@4.0.2:
+ optional: true
+
+ yocto-queue@0.1.0: {}
+
+ zod@4.3.6: {}
diff --git a/bubus-ts/pnpm-workspace.yaml b/bubus-ts/pnpm-workspace.yaml
new file mode 100644
index 0000000..e4a4b5b
--- /dev/null
+++ b/bubus-ts/pnpm-workspace.yaml
@@ -0,0 +1,2 @@
+onlyBuiltDependencies:
+ - better-sqlite3
diff --git a/bubus-ts/prettier.config.js b/bubus-ts/prettier.config.js
new file mode 100644
index 0000000..98b89f5
--- /dev/null
+++ b/bubus-ts/prettier.config.js
@@ -0,0 +1,8 @@
+const config = {
+ semi: false,
+ singleQuote: true,
+ trailingComma: 'es5',
+ printWidth: 140,
+}
+
+export default config
diff --git a/bubus-ts/src/async_context.ts b/bubus-ts/src/async_context.ts
new file mode 100644
index 0000000..c2ed50a
--- /dev/null
+++ b/bubus-ts/src/async_context.ts
@@ -0,0 +1,53 @@
+declare const process: { versions?: { node?: string } } | undefined
+
+type AsyncLocalStorageLike = {
+ getStore(): unknown
+ run(store: unknown, callback: () => T): T
+ enterWith?(store: unknown): void
+}
+
+export type { AsyncLocalStorageLike }
+
+// Cache the AsyncLocalStorage constructor so multiple modules can create separate instances.
+let _AsyncLocalStorageClass: (new () => AsyncLocalStorageLike) | null = null
+
+const is_node = typeof process !== 'undefined' && typeof process.versions !== 'undefined' && typeof process.versions.node === 'string'
+
+if (is_node) {
+ try {
+ const importer = new Function('specifier', 'return import(specifier)') as (
+ specifier: string
+ ) => Promise<{ AsyncLocalStorage?: new () => AsyncLocalStorageLike }>
+ const mod = await importer('node:async_hooks')
+ if (mod?.AsyncLocalStorage) {
+ _AsyncLocalStorageClass = mod.AsyncLocalStorage
+ }
+ } catch {
+ _AsyncLocalStorageClass = null
+ }
+}
+
+/** Create a new AsyncLocalStorage instance, or null if unavailable (e.g. in browsers). */
+export const createAsyncLocalStorage = (): AsyncLocalStorageLike | null => {
+ if (!_AsyncLocalStorageClass) return null
+ return new _AsyncLocalStorageClass()
+}
+
+// The primary AsyncLocalStorage instance used for event dispatch context propagation.
+export let async_local_storage: AsyncLocalStorageLike | null = _AsyncLocalStorageClass ? new _AsyncLocalStorageClass() : null
+
+export const captureAsyncContext = (): unknown | null => {
+ if (!async_local_storage) {
+ return null
+ }
+ return async_local_storage.getStore() ?? null
+}
+
+export const runWithAsyncContext = (context: unknown | null, fn: () => T): T => {
+ if (!async_local_storage) {
+ return fn()
+ }
+ return async_local_storage.run(context ?? undefined, fn)
+}
+
+export const hasAsyncLocalStorage = (): boolean => async_local_storage !== null
diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts
new file mode 100644
index 0000000..cf57dac
--- /dev/null
+++ b/bubus-ts/src/base_event.ts
@@ -0,0 +1,899 @@
+import { z } from 'zod'
+import { v7 as uuidv7 } from 'uuid'
+
+import type { EventBus } from './event_bus.js'
+import type { EventHandler } from './event_handler.js'
+import { EventResult } from './event_result.js'
+import { EventHandlerAbortedError, EventHandlerCancelledError, EventHandlerTimeoutError } from './event_handler.js'
+import type { EventConcurrencyMode, EventHandlerConcurrencyMode, EventHandlerCompletionMode, Deferred } from './lock_manager.js'
+import {
+ AsyncLock,
+ EVENT_CONCURRENCY_MODES,
+ EVENT_HANDLER_CONCURRENCY_MODES,
+ EVENT_HANDLER_COMPLETION_MODES,
+ withResolvers,
+} from './lock_manager.js'
+import { extractZodShape, normalizeEventResultType, toJsonSchema } from './types.js'
+import type { EventResultType } from './types.js'
+
+export const BaseEventSchema = z
+ .object({
+ event_id: z.string().uuid(),
+ event_created_at: z.string().datetime(),
+ event_created_ts: z.number().optional(),
+ event_type: z.string(),
+ event_version: z.string().default('0.0.1'),
+ event_timeout: z.number().positive().nullable(),
+ event_handler_timeout: z.number().positive().nullable().optional(),
+ event_handler_slow_timeout: z.number().positive().nullable().optional(),
+ event_parent_id: z.string().uuid().nullable().optional(),
+ event_path: z.array(z.string()).optional(),
+ event_result_type: z.unknown().optional(),
+ event_emitted_by_handler_id: z.string().uuid().nullable().optional(),
+ event_pending_bus_count: z.number().nonnegative().optional(),
+ event_status: z.enum(['pending', 'started', 'completed']).optional(),
+ event_started_at: z.string().datetime().nullable().optional(),
+ event_started_ts: z.number().nullable().optional(),
+ event_completed_at: z.string().datetime().nullable().optional(),
+ event_completed_ts: z.number().nullable().optional(),
+ event_results: z.array(z.unknown()).optional(),
+ event_concurrency: z.enum(EVENT_CONCURRENCY_MODES).nullable().optional(),
+ event_handler_concurrency: z.enum(EVENT_HANDLER_CONCURRENCY_MODES).nullable().optional(),
+ event_handler_completion: z.enum(EVENT_HANDLER_COMPLETION_MODES).nullable().optional(),
+ })
+ .loose()
+
+export type BaseEventData = z.infer
+export type BaseEventJSON = BaseEventData & Record
+type BaseEventFields = Pick<
+ BaseEventData,
+ | 'event_id'
+ | 'event_created_at'
+ | 'event_created_ts'
+ | 'event_type'
+ | 'event_version'
+ | 'event_timeout'
+ | 'event_handler_timeout'
+ | 'event_handler_slow_timeout'
+ | 'event_parent_id'
+ | 'event_path'
+ | 'event_result_type'
+ | 'event_emitted_by_handler_id'
+ | 'event_pending_bus_count'
+ | 'event_status'
+ | 'event_started_at'
+ | 'event_started_ts'
+ | 'event_completed_at'
+ | 'event_completed_ts'
+ | 'event_results'
+ | 'event_concurrency'
+ | 'event_handler_concurrency'
+ | 'event_handler_completion'
+>
+
+export type BaseEventInit> = TFields & Partial
+
+type BaseEventSchemaShape = typeof BaseEventSchema.shape
+
+export type EventSchema = z.ZodObject
+type EventPayload = z.infer>
+
+type EventInput = z.input>
+export type EventInit = Omit, keyof BaseEventFields> & Partial
+
+type EventWithResultSchema = BaseEvent & { __event_result_type__?: TResult }
+
+type ResultTypeFromEventResultTypeInput = TInput extends z.ZodTypeAny
+ ? z.infer
+ : TInput extends StringConstructor
+ ? string
+ : TInput extends NumberConstructor
+ ? number
+ : TInput extends BooleanConstructor
+ ? boolean
+ : TInput extends ArrayConstructor
+ ? unknown[]
+ : TInput extends ObjectConstructor
+ ? Record
+ : unknown
+
+type ResultSchemaFromShape = TShape extends { event_result_type: infer S } ? ResultTypeFromEventResultTypeInput : unknown
+
+export type EventFactory = {
+ (data: EventInit): EventWithResultSchema & EventPayload
+ new (data: EventInit): EventWithResultSchema & EventPayload
+ schema: EventSchema
+ event_type?: string
+ event_version?: string
+ event_result_type?: z.ZodTypeAny
+ fromJSON?: (data: unknown) => EventWithResultSchema & EventPayload
+}
+
+type ZodShapeFrom> = {
+ [K in keyof TShape as K extends 'event_result_type' ? never : TShape[K] extends z.ZodTypeAny ? K : never]: Extract<
+ TShape[K],
+ z.ZodTypeAny
+ >
+}
+
+export class BaseEvent {
+ // event metadata fields
+ event_id!: string // unique uuidv7 identifier for the event
+ event_created_at!: string // ISO datetime string version of event_created_at
+ event_created_ts!: number // nanosecond monotonic version of event_created_at
+ event_type!: string // should match the class name of the event, e.g. BaseEvent.extend("MyEvent").event_type === "MyEvent"
+ event_version!: string // event schema/version tag managed by callers for migration-friendly payload handling
+ event_timeout!: number | null // maximum time in seconds that the event is allowed to run before it is aborted
+ event_handler_timeout?: number | null // optional per-event handler timeout override in seconds
+ event_handler_slow_timeout?: number | null // optional per-event slow handler warning threshold in seconds
+ event_parent_id!: string | null // id of the parent event that triggered this event, if this event was emitted during handling of another event, else null
+ event_path!: string[] // list of bus labels (name#id) that the event has been dispatched to, including the current bus
+ event_result_type?: z.ZodTypeAny // optional zod schema to enforce the shape of return values from handlers
+ event_results!: Map> // map of handler ids to EventResult objects for the event
+ event_emitted_by_handler_id!: string | null // if event was emitted inside a handler while it was running, this is set to the enclosing handler's handler id, else null
+ event_pending_bus_count!: number // number of buses that have accepted this event and not yet finished processing or removed it from their queues (for queue-jump processing)
+ event_status!: 'pending' | 'started' | 'completed' // processing status of the event as a whole, no separate 'error' state because events can not error, only individual handlers can
+ event_started_at?: string | null // ISO datetime string version of event_started_ts
+ event_started_ts?: number | null // nanosecond monotonic version of event_started_at
+ event_completed_at?: string | null // ISO datetime string version of event_completed_ts
+ event_completed_ts?: number | null // nanosecond monotonic version of event_completed_at
+ event_concurrency?: EventConcurrencyMode | null // concurrency mode for the event as a whole in relation to other events
+ event_handler_concurrency?: EventHandlerConcurrencyMode | null // concurrency mode for the handlers within the event
+ event_handler_completion?: EventHandlerCompletionMode | null // completion strategy: 'all' (default) waits for every handler, 'first' returns earliest non-undefined result and cancels the rest
+
+ static event_type?: string // class name of the event, e.g. BaseEvent.extend("MyEvent").event_type === "MyEvent"
+ static event_version = '0.0.1'
+ static schema = BaseEventSchema // zod schema for the event data fields, used to parse and validate event data when creating a new event
+
+ // internal runtime state
+ bus?: EventBus // shortcut to the bus that dispatched this event, for event.bus.dispatch(event) auto-child tracking via proxy wrapping
+ _event_original?: BaseEvent // underlying event object that was dispatched, if this is a bus-scoped proxy wrapping it
+ _event_dispatch_context?: unknown | null // captured AsyncLocalStorage context at dispatch site, used to restore that context when running handlers
+
+ _event_done_signal: Deferred | null
+ _event_handler_lock: AsyncLock | null
+
+ constructor(data: BaseEventInit> = {}) {
+ const ctor = this.constructor as typeof BaseEvent & {
+ event_version?: string
+ event_result_type?: z.ZodTypeAny
+ }
+ const event_type = data.event_type ?? ctor.event_type ?? ctor.name
+ const event_version = data.event_version ?? ctor.event_version ?? '0.0.1'
+ const raw_event_result_type = data.event_result_type ?? ctor.event_result_type
+ const event_result_type = normalizeEventResultType(raw_event_result_type)
+ const event_id = data.event_id ?? uuidv7()
+ const { isostring: default_event_created_at, ts: default_event_created_ts } = BaseEvent.nextTimestamp()
+ const event_created_at = data.event_created_at ?? default_event_created_at
+ const event_created_ts = data.event_created_ts === undefined ? default_event_created_ts : data.event_created_ts
+ const event_timeout = data.event_timeout ?? null
+
+ const base_data = {
+ ...data,
+ event_id,
+ event_created_at,
+ event_created_ts,
+ event_type,
+ event_version,
+ event_timeout,
+ event_result_type,
+ }
+
+ const schema = ctor.schema ?? BaseEventSchema
+ const parsed = schema.parse(base_data) as BaseEventData & Record
+
+ Object.assign(this, parsed)
+
+ const parsed_path = (parsed as { event_path?: string[] }).event_path
+ this.event_path = Array.isArray(parsed_path) ? [...parsed_path] : []
+
+ // load event results from potentially raw objects from JSON to proper EventResult objects
+ this.event_results = hydrateEventResults(this, (parsed as { event_results?: unknown }).event_results)
+ this.event_pending_bus_count =
+ typeof (parsed as { event_pending_bus_count?: unknown }).event_pending_bus_count === 'number'
+ ? Math.max(0, Number((parsed as { event_pending_bus_count?: number }).event_pending_bus_count))
+ : 0
+ const parsed_status = (parsed as { event_status?: unknown }).event_status
+ this.event_status =
+ parsed_status === 'pending' || parsed_status === 'started' || parsed_status === 'completed' ? parsed_status : 'pending'
+
+ this.event_started_at = parsed.event_started_at ?? null
+ this.event_started_ts = parsed.event_started_ts ?? null
+ this.event_completed_at = parsed.event_completed_at ?? null
+ this.event_completed_ts = parsed.event_completed_ts ?? null
+ this.event_parent_id =
+ typeof (parsed as { event_parent_id?: unknown }).event_parent_id === 'string'
+ ? (parsed as { event_parent_id: string }).event_parent_id
+ : null
+ this.event_emitted_by_handler_id =
+ typeof (parsed as { event_emitted_by_handler_id?: unknown }).event_emitted_by_handler_id === 'string'
+ ? (parsed as { event_emitted_by_handler_id: string }).event_emitted_by_handler_id
+ : null
+
+ this.event_result_type = event_result_type
+ this.event_created_ts = parsed.event_created_ts ?? event_created_ts
+
+ this._event_done_signal = null
+ this._event_handler_lock = null
+ this._event_dispatch_context = undefined
+ }
+
+ // "MyEvent#a48f"
+ toString(): string {
+ return `${this.event_type}#${this.event_id.slice(-4)}`
+ }
+
+ // get the next monotonic timestamp for global ordering of all operations
+ static nextTimestamp(): { date: Date; isostring: string; ts: number } {
+ const ts = performance.now()
+ const date = new Date(performance.timeOrigin + ts)
+ return { date, isostring: date.toISOString(), ts }
+ }
+
+ // main entry point for users to define their own event types
+ // BaseEvent.extend("MyEvent", { some_custom_field: z.string(), event_result_type: z.string(), event_timeout: 25, ... }) -> MyEvent
+ static extend(event_type: string, shape?: TShape): EventFactory>
+ static extend>(
+ event_type: string,
+ shape?: TShape
+ ): EventFactory, ResultSchemaFromShape>
+ static extend>(
+ event_type: string,
+ shape: TShape = {} as TShape
+ ): EventFactory, ResultSchemaFromShape> {
+ const raw_shape = shape as Record
+ const raw_event_result_type = raw_shape.event_result_type
+ const event_result_type = normalizeEventResultType(raw_event_result_type)
+ const event_version = typeof raw_shape.event_version === 'string' ? raw_shape.event_version : undefined
+
+ const zod_shape = extractZodShape(raw_shape)
+ const full_schema = BaseEventSchema.extend(zod_shape)
+
+ // create a new event class that extends BaseEvent and adds the custom fields
+ class ExtendedEvent extends BaseEvent {
+ static schema = full_schema as unknown as typeof BaseEvent.schema
+ static event_type = event_type
+ static event_version = event_version ?? BaseEvent.event_version
+ static event_result_type = event_result_type
+
+ constructor(data: EventInit>) {
+ super(data as BaseEventInit>)
+ }
+ }
+
+ type FactoryResult = EventWithResultSchema> & EventPayload>
+
+ function EventFactory(data: EventInit>): FactoryResult {
+ return new ExtendedEvent(data) as FactoryResult
+ }
+
+ EventFactory.schema = full_schema as EventSchema>
+ EventFactory.event_type = event_type
+ EventFactory.event_version = event_version ?? BaseEvent.event_version
+ EventFactory.event_result_type = event_result_type
+ EventFactory.fromJSON = (data: unknown) => (ExtendedEvent.fromJSON as (data: unknown) => FactoryResult)(data)
+ EventFactory.prototype = ExtendedEvent.prototype
+ ;(EventFactory as unknown as { class: typeof ExtendedEvent }).class = ExtendedEvent
+
+ return EventFactory as unknown as EventFactory, ResultSchemaFromShape>
+ }
+
+ static fromJSON(this: T, data: unknown): InstanceType {
+ if (!data || typeof data !== 'object') {
+ const schema = this.schema ?? BaseEventSchema
+ const parsed = schema.parse(data)
+ return new this(parsed) as InstanceType
+ }
+ const record = { ...(data as Record) }
+ if (record.event_result_type !== undefined && record.event_result_type !== null) {
+ record.event_result_type = normalizeEventResultType(record.event_result_type)
+ }
+ return new this(record as BaseEventInit>) as InstanceType
+ }
+
+ static toJSONArray(events: Iterable): BaseEventJSON[] {
+ return Array.from(events, (event) => {
+ const original = event._event_original ?? event
+ return original.toJSON()
+ })
+ }
+
+ static fromJSONArray(data: unknown): BaseEvent[] {
+ if (!Array.isArray(data)) {
+ return []
+ }
+ return data.map((item) => BaseEvent.fromJSON(item))
+ }
+
+ toJSON(): BaseEventJSON {
+ const record: Record = {}
+ for (const [key, value] of Object.entries(this as unknown as Record)) {
+ if (key.startsWith('_') || key === 'bus' || key === 'event_results') continue
+ if (value === undefined || typeof value === 'function') continue
+ record[key] = value
+ }
+ const event_results = Array.from(this.event_results.values()).map((result) => result.toJSON())
+
+ return {
+ ...record,
+ event_id: this.event_id,
+ event_type: this.event_type,
+ event_version: this.event_version,
+ event_result_type: this.event_result_type ? toJsonSchema(this.event_result_type) : this.event_result_type,
+
+ // static configuration options
+ event_timeout: this.event_timeout,
+ event_concurrency: this.event_concurrency,
+ event_handler_concurrency: this.event_handler_concurrency,
+ event_handler_completion: this.event_handler_completion,
+ event_handler_slow_timeout: this.event_handler_slow_timeout,
+ event_handler_timeout: this.event_handler_timeout,
+
+ // mutable parent/child/bus tracking runtime state
+ event_parent_id: this.event_parent_id,
+ event_path: this.event_path,
+ event_emitted_by_handler_id: this.event_emitted_by_handler_id,
+ event_pending_bus_count: this.event_pending_bus_count,
+
+ // mutable runtime status and timestamps
+ event_status: this.event_status,
+ event_created_at: this.event_created_at,
+ event_created_ts: this.event_created_ts,
+ event_started_at: this.event_started_at,
+ event_started_ts: this.event_started_ts,
+ event_completed_at: this.event_completed_at,
+ event_completed_ts: this.event_completed_ts,
+
+ // mutable result state
+ ...(event_results.length > 0 ? { event_results } : {}),
+ }
+ }
+
+ createSlowEventWarningTimer(): ReturnType | null {
+ const event_slow_timeout = (this as { event_slow_timeout?: number | null }).event_slow_timeout ?? this.bus?.event_slow_timeout ?? null
+ const event_warn_ms = event_slow_timeout === null ? null : event_slow_timeout * 1000
+ if (event_warn_ms === null) {
+ return null
+ }
+ const name = this.bus?.name ?? 'EventBus'
+ return setTimeout(() => {
+ if (this.event_status === 'completed') {
+ return
+ }
+ const running_handler_count = [...this.event_results.values()].filter((result) => result.status === 'started').length
+ const started_ts = this.event_started_ts ?? this.event_created_ts ?? performance.now()
+ const elapsed_ms = Math.max(0, performance.now() - started_ts)
+ const elapsed_seconds = (elapsed_ms / 1000).toFixed(2)
+ console.warn(
+ `[bubus] Slow event processing: ${name}.on(${this.event_type}#${this.event_id.slice(-4)}, ${running_handler_count} handlers) still running after ${elapsed_seconds}s`
+ )
+ }, event_warn_ms)
+ }
+
+ createPendingHandlerResults(bus: EventBus): Array<{
+ handler: EventHandler
+ result: EventResult
+ }> {
+ const original_event = this._event_original ?? this
+ const scoped_event = bus.getEventProxyScopedToThisBus(original_event)
+ const handlers = bus.getHandlersForEvent(original_event)
+ return handlers.map((entry) => {
+ const handler_id = entry.id
+ const existing = original_event.event_results.get(handler_id)
+ const result = existing ?? new EventResult({ event: scoped_event, handler: entry })
+ if (!existing) {
+ original_event.event_results.set(handler_id, result)
+ } else if (existing.event !== scoped_event) {
+ existing.event = scoped_event
+ }
+ return { handler: entry, result }
+ })
+ }
+
+ // Run all pending handler results for the current bus context.
+ async processEvent(
+ pending_entries?: Array<{
+ handler: EventHandler
+ result: EventResult
+ }>
+ ): Promise {
+ const original = this._event_original ?? this
+ const bus_id = this.bus?.id
+ const pending_results =
+ pending_entries?.map((entry) => entry.result) ??
+ Array.from(original.event_results.values()).filter((result) => !bus_id || result.eventbus_id === bus_id)
+ if (pending_results.length === 0) {
+ return
+ }
+ if (original.event_handler_completion === 'first') {
+ const is_serial_handler_mode = original.getHandlerLock() !== null
+ if (is_serial_handler_mode) {
+ for (const entry of pending_results) {
+ await entry.runHandler()
+ if (entry.status === 'completed' && entry.result !== undefined && !(entry.result instanceof BaseEvent)) {
+ original.cancelEventHandlersForFirstMode(entry)
+ break
+ }
+ }
+ } else {
+ const handler_promises = pending_results.map((entry) => entry.runHandler())
+ let first_found = false
+ const monitored = pending_results.map((entry, i) =>
+ handler_promises[i].then(() => {
+ if (!first_found && entry.status === 'completed' && entry.result !== undefined && !(entry.result instanceof BaseEvent)) {
+ first_found = true
+ original.cancelEventHandlersForFirstMode(entry)
+ }
+ })
+ )
+ await Promise.all(monitored)
+ }
+ } else {
+ const handler_promises = pending_results.map((entry) => entry.runHandler())
+ await Promise.all(handler_promises)
+ }
+ }
+
+ getHandlerLock(default_concurrency?: EventHandlerConcurrencyMode): AsyncLock | null {
+ const original = this._event_original ?? this
+ const resolved =
+ original.event_handler_concurrency ?? default_concurrency ?? original.bus?.event_handler_concurrency_default ?? 'serial'
+ if (resolved === 'parallel') {
+ return null
+ }
+ if (!original._event_handler_lock) {
+ original._event_handler_lock = new AsyncLock(1)
+ }
+ return original._event_handler_lock
+ }
+
+ // Get parent event object from event_parent_id (checks across all busses)
+ get event_parent(): BaseEvent | undefined {
+ const original = this._event_original ?? this
+ const parent_id = original.event_parent_id
+ if (!parent_id) {
+ return undefined
+ }
+ return original.bus?.findEventById(parent_id) ?? undefined
+ }
+
+ // get all direct children of this event
+ get event_children(): BaseEvent[] {
+ const children: BaseEvent[] = []
+ const seen = new Set()
+ for (const result of this.event_results.values()) {
+ for (const child of result.event_children) {
+ if (!seen.has(child.event_id)) {
+ seen.add(child.event_id)
+ children.push(child)
+ }
+ }
+ }
+ return children
+ }
+
+ // get all children grandchildren etc. recursively
+ get event_descendants(): BaseEvent[] {
+ const descendants: BaseEvent[] = []
+ const visited = new Set()
+ const root_id = this.event_id
+ const stack = [...this.event_children]
+
+ while (stack.length > 0) {
+ const child = stack.pop()
+ if (!child) {
+ continue
+ }
+ const child_id = child.event_id
+ if (child_id === root_id) {
+ continue
+ }
+ if (visited.has(child_id)) {
+ continue
+ }
+ visited.add(child_id)
+ descendants.push(child)
+ if (child.event_children.length > 0) {
+ stack.push(...child.event_children)
+ }
+ }
+
+ return descendants
+ }
+
+ // force-abort processing of all pending descendants of an event regardless of whether they have already started
+ cancelPendingDescendants(reason: unknown): void {
+ const original = this._event_original ?? this
+ const cancellation_cause =
+ reason instanceof EventHandlerTimeoutError
+ ? reason
+ : reason instanceof EventHandlerCancelledError || reason instanceof EventHandlerAbortedError
+ ? reason.cause instanceof Error
+ ? reason.cause
+ : reason
+ : reason instanceof Error
+ ? reason
+ : new Error(String(reason))
+ const visited = new Set()
+ const cancelChildEvent = (child: BaseEvent): void => {
+ const original_child = child._event_original ?? child
+ if (visited.has(original_child.event_id)) {
+ return
+ }
+ visited.add(original_child.event_id)
+
+ // Depth-first: cancel grandchildren before parent so
+ // eventAreAllChildrenComplete() returns true when we get back up.
+ for (const grandchild of original_child.event_children) {
+ cancelChildEvent(grandchild)
+ }
+
+ original_child.markCancelled(cancellation_cause)
+
+ // Force-complete the child event. In JS we can't stop running async
+ // handlers, but markCompleted() resolves the done() promise so callers
+ // aren't blocked waiting for background work to finish. The background
+ // handler's eventual markCompleted/markError is a no-op (terminal guard).
+ if (original_child.event_status !== 'completed') {
+ original_child.markCompleted()
+ }
+ }
+
+ for (const child of original.event_children) {
+ cancelChildEvent(child)
+ }
+ }
+
+ // Cancel all handler results for an event except the winner, used by first() mode.
+ // Cancels pending handlers immediately, aborts started handlers via signalAbort(),
+ // and cancels any child events emitted by the losing handlers.
+ cancelEventHandlersForFirstMode(winner: EventResult): void {
+ const cause = new Error('first() resolved: another handler returned a result first')
+ const bus_id = winner.eventbus_id
+
+ for (const result of this.event_results.values()) {
+ if (result === winner) continue
+ if (result.eventbus_id !== bus_id) continue
+
+ if (result.status === 'pending') {
+ result.markError(
+ new EventHandlerCancelledError(`Cancelled: first() resolved`, {
+ event_result: result,
+ cause,
+ })
+ )
+ } else if (result.status === 'started') {
+ // Cancel child events emitted by this handler before aborting it
+ for (const child of result.event_children) {
+ const original_child = child._event_original ?? child
+ original_child.cancelPendingDescendants(cause)
+ original_child.markCancelled(cause)
+ }
+
+ // Abort the handler itself
+ result._lock?.exitHandlerRun()
+ const aborted_error = new EventHandlerAbortedError(`Aborted: first() resolved`, {
+ event_result: result,
+ cause,
+ })
+ result.markError(aborted_error)
+ result.signalAbort(aborted_error)
+ }
+ }
+ }
+
+ // force-abort processing of this event regardless of whether it is pending or has already started
+ markCancelled(cause: Error): void {
+ const original = this._event_original ?? this
+ const registry = this.bus!._all_instances
+ const path = Array.isArray(original.event_path) ? original.event_path : []
+ const buses_to_cancel = new Set(path)
+ for (const bus of registry as Iterable<{
+ name?: string
+ label?: string
+ pending_event_queue?: BaseEvent[]
+ in_flight_event_ids?: Set
+ createPendingHandlerResults?: (event: BaseEvent) => Array<{ result: EventResult }>
+ getHandlersForEvent?: (event: BaseEvent) => unknown
+ }>) {
+ if (!bus?.label || !buses_to_cancel.has(bus.label)) {
+ continue
+ }
+
+ const handler_entries = original.createPendingHandlerResults(bus as unknown as EventBus)
+ let updated = false
+ for (const entry of handler_entries) {
+ if (entry.result.status === 'pending') {
+ const cancelled_error = new EventHandlerCancelledError(`Cancelled pending handler due to parent error: ${cause.message}`, {
+ event_result: entry.result,
+ cause,
+ })
+ entry.result.markError(cancelled_error)
+ updated = true
+ } else if (entry.result.status === 'started') {
+ entry.result._lock?.exitHandlerRun()
+ const aborted_error = new EventHandlerAbortedError(`Aborted running handler due to parent error: ${cause.message}`, {
+ event_result: entry.result,
+ cause,
+ })
+ entry.result.markError(aborted_error)
+ entry.result.signalAbort(aborted_error)
+ updated = true
+ }
+ }
+
+ let removed = 0
+ if (Array.isArray(bus.pending_event_queue) && bus.pending_event_queue.length > 0) {
+ const before_len = bus.pending_event_queue.length
+ bus.pending_event_queue = bus.pending_event_queue.filter(
+ (queued) => (queued._event_original ?? queued).event_id !== original.event_id
+ )
+ removed = before_len - bus.pending_event_queue.length
+ }
+
+ if (removed > 0 && !bus.in_flight_event_ids?.has(original.event_id)) {
+ original.event_pending_bus_count = Math.max(0, original.event_pending_bus_count - 1)
+ }
+
+ if (updated || removed > 0) {
+ original.markCompleted(false)
+ }
+ }
+
+ if (original.event_status !== 'completed') {
+ original.markCompleted()
+ }
+ }
+
+ notifyEventParentsOfCompletion(): void {
+ const original = this._event_original ?? this
+ const registry = this.bus!._all_instances as { findEventById: (id: string) => BaseEvent | null }
+ const visited = new Set()
+ let parent_id = original.event_parent_id
+ while (parent_id && !visited.has(parent_id)) {
+ visited.add(parent_id)
+ const parent = registry.findEventById(parent_id)
+ if (!parent) {
+ break
+ }
+ parent.markCompleted(false, false)
+ if (parent.event_status !== 'completed') {
+ break
+ }
+ parent_id = parent.event_parent_id
+ }
+ }
+
+ // awaitable that triggers immediate (queue-jump) processing of the event on all buses where it is queued
+ // use event.waitForCompletion() or event.finished() to wait for the event to be processed in normal queue order
+ done(): Promise {
+ if (!this.bus) {
+ return Promise.reject(new Error('event has no bus attached'))
+ }
+ if (this.event_status === 'completed') {
+ return Promise.resolve(this)
+ }
+ // Always delegate to processEventImmediately β it walks up the parent event tree
+ // to determine whether we're inside a handler (works cross-bus). If no
+ // ancestor handler is in-flight, it falls back to waitForCompletion().
+ const runner_bus = this.bus as {
+ processEventImmediately: (event: BaseEvent) => Promise
+ }
+ return runner_bus.processEventImmediately(this) as Promise
+ }
+
+ // clearer alias for done() to indicate that the event will be processed immediately
+ // await bus.dispatch(event).immediate() is less ambiguous than await event.done()
+ immediate(): Promise {
+ return this.done()
+ }
+
+ // returns the first non-undefined handler result value, cancelling remaining handlers
+ // when any handler completes. Works with all event_handler_concurrency modes:
+ // parallel: races all handlers, returns first non-undefined, aborts the rest
+ // serial: runs handlers sequentially, returns first non-undefined, skips remaining
+ first(): Promise | undefined> {
+ if (!this.bus) {
+ return Promise.reject(new Error('event has no bus attached'))
+ }
+ const original = this._event_original ?? this
+ original.event_handler_completion = 'first'
+ return this.done().then((completed_event) => {
+ const orig = completed_event._event_original ?? completed_event
+ return Array.from(orig.event_results.values())
+ .filter((result) => result.status === 'completed' && result.result !== undefined && !(result.result instanceof BaseEvent))
+ .sort((a, b) => (a.completed_ts ?? 0) - (b.completed_ts ?? 0))
+ .map((result) => result.result as EventResultType)
+ .at(0)
+ })
+ }
+
+ // awaitable that waits for the event to be processed in normal queue order by the runloop
+ waitForCompletion(): Promise {
+ if (this.event_status === 'completed') {
+ return Promise.resolve(this)
+ }
+ this._notifyDoneListeners()
+ return this._event_done_signal!.promise
+ }
+
+ // convenience alias for await event.waitForCompletion()
+ finished(): Promise {
+ return this.waitForCompletion()
+ }
+
+ markPending(): this {
+ const original = this._event_original ?? this
+ original.event_status = 'pending'
+ original.event_started_at = null
+ original.event_started_ts = null
+ original.event_completed_at = null
+ original.event_completed_ts = null
+ original.event_results.clear()
+ original.event_pending_bus_count = 0
+ original._event_dispatch_context = undefined
+ original._event_done_signal = null
+ original._event_handler_lock = null
+ original.bus = undefined
+ return this
+ }
+
+ reset(): this {
+ const original = this._event_original ?? this
+ const ctor = original.constructor as typeof BaseEvent
+ const fresh_event = ctor.fromJSON(original.toJSON()) as this
+ fresh_event.event_id = uuidv7()
+ return fresh_event.markPending()
+ }
+
+ markStarted(): void {
+ const original = this._event_original ?? this
+ if (original.event_status !== 'pending') {
+ return
+ }
+ original.event_status = 'started'
+ const { isostring: event_started_at, ts: event_started_ts } = BaseEvent.nextTimestamp()
+ original.event_started_at = event_started_at
+ original.event_started_ts = event_started_ts
+ if (original.bus) {
+ const event_for_bus = original.bus.getEventProxyScopedToThisBus(original)
+ original.bus.scheduleMicrotask(() => {
+ void original.bus!._on_event_change(event_for_bus, 'started')
+ })
+ }
+ }
+
+ markCompleted(force: boolean = true, notify_parents: boolean = true): void {
+ const original = this._event_original ?? this
+ if (original.event_status === 'completed') {
+ return
+ }
+ if (!force) {
+ if (original.event_pending_bus_count > 0) {
+ return
+ }
+ if (!original.eventAreAllChildrenComplete()) {
+ return
+ }
+ }
+ original.event_status = 'completed'
+ const { isostring: event_completed_at, ts: event_completed_ts } = BaseEvent.nextTimestamp()
+ original.event_completed_at = event_completed_at
+ original.event_completed_ts = event_completed_ts
+ if (original.bus) {
+ const event_for_bus = original.bus.getEventProxyScopedToThisBus(original)
+ original.bus.scheduleMicrotask(() => {
+ void original.bus!._on_event_change(event_for_bus, 'completed')
+ })
+ }
+ original._event_dispatch_context = null
+ original._notifyDoneListeners()
+ original._event_done_signal!.resolve(original)
+ original._event_done_signal = null
+ original.dropFromZeroHistoryBuses()
+ if (notify_parents && original.bus) {
+ original.notifyEventParentsOfCompletion()
+ }
+ }
+
+ private dropFromZeroHistoryBuses(): void {
+ if (!this.bus) {
+ return
+ }
+ const original = this._event_original ?? this
+ for (const bus of this.bus._all_instances) {
+ if (bus.max_history_size !== 0) {
+ continue
+ }
+ if (bus.event_history.has(original.event_id)) {
+ bus.event_history.delete(original.event_id)
+ }
+ }
+ }
+
+ get event_errors(): unknown[] {
+ // const errors: unknown[] = []
+ // for (const result of this.event_results.values()) {
+ // if (result.error !== undefined) {
+ // errors.push(result.error)
+ // }
+ // }
+ // return errors
+ return (
+ Array.from(this.event_results.values())
+ // filter for events that have completed + have non-undefined error values
+ .filter((event_result) => event_result.error !== undefined && event_result.completed_ts !== null)
+ // sort by completion time
+ .sort((event_result_a, event_result_b) => (event_result_a.completed_ts ?? 0) - (event_result_b.completed_ts ?? 0))
+ // assemble array of flat error values
+ .map((event_result) => event_result.error)
+ )
+ }
+
+ // all non-undefined handler result values in completion order
+ get all_results(): EventResultType[] {
+ return (
+ Array.from(this.event_results.values())
+ // only events that have completed + have non-undefined result values
+ .filter((event_result) => event_result.completed_ts !== null && event_result.result !== undefined)
+ // sort by completion time
+ .sort((event_result_a, event_result_b) => (event_result_a.completed_ts ?? 0) - (event_result_b.completed_ts ?? 0))
+ // assemble array of flat parsed handler return values
+ .map((event_result) => event_result.result as EventResultType)
+ )
+ }
+
+ // Returns the first non-undefined completed handler result, sorted by completion time.
+ // Useful after first() or done() to get the winning result value.
+ get event_result(): EventResultType | undefined {
+ return this.all_results.at(0)
+ }
+
+ // Returns the last non-undefined completed handler result, sorted by completion time.
+ // Useful after first() or done() to get the winning result value.
+ get last_result(): EventResultType | undefined {
+ return this.all_results.at(-1)
+ }
+
+ eventAreAllChildrenComplete(): boolean {
+ for (const descendant of this.event_descendants) {
+ if (descendant.event_status !== 'completed') {
+ return false
+ }
+ }
+ return true
+ }
+
+ _notifyDoneListeners(): void {
+ if (this._event_done_signal) {
+ return
+ }
+ this._event_done_signal = withResolvers()
+ }
+
+ // Break internal reference chains so a completed event can be GC'd when
+ // evicted from event_history. Called by EventBus.trimHistory().
+ _gc(): void {
+ this._event_done_signal = null
+ this._event_dispatch_context = null
+ this.bus = undefined
+ this._event_handler_lock = null
+ for (const result of this.event_results.values()) {
+ result.event_children = []
+ }
+ this.event_results.clear()
+ }
+}
+
+const hydrateEventResults = (event: TEvent, raw_event_results: unknown): Map> => {
+ const event_results = new Map>()
+ if (!Array.isArray(raw_event_results)) {
+ return event_results
+ }
+ for (const item of raw_event_results) {
+ const result = EventResult.fromJSON(event, item)
+ const map_key = typeof result.handler_id === 'string' && result.handler_id.length > 0 ? result.handler_id : result.id
+ event_results.set(map_key, result)
+ }
+ return event_results
+}
diff --git a/bubus-ts/src/bridge_jsonl.ts b/bubus-ts/src/bridge_jsonl.ts
new file mode 100644
index 0000000..42daa47
--- /dev/null
+++ b/bubus-ts/src/bridge_jsonl.ts
@@ -0,0 +1,174 @@
+import { BaseEvent } from './base_event.js'
+import { EventBus } from './event_bus.js'
+import type { EventClass, EventHandlerFunction, EventPattern, UntypedEventHandlerFunction } from './types.js'
+
+const isNodeRuntime = (): boolean => {
+ const maybe_process = (globalThis as { process?: { versions?: { node?: string } } }).process
+ return typeof maybe_process?.versions?.node === 'string'
+}
+
+const importNodeModule = async (specifier: string): Promise => {
+ const dynamic_import = Function('module_name', 'return import(module_name)') as (module_name: string) => Promise
+ return dynamic_import(specifier) as Promise
+}
+
+const randomSuffix = (): string => Math.random().toString(36).slice(2, 10)
+
+export class JSONLEventBridge {
+ readonly path: string
+ readonly poll_interval: number
+ readonly name: string
+
+ private readonly inbound_bus: EventBus
+ private running: boolean
+ private byte_offset: number
+ private pending_line: string
+ private listener_task: Promise | null
+
+ constructor(path: string, poll_interval: number = 0.25, name?: string) {
+ this.path = path
+ this.poll_interval = poll_interval
+ this.name = name ?? `JSONLEventBridge_${randomSuffix()}`
+ this.inbound_bus = new EventBus(this.name, { max_history_size: 0 })
+ this.running = false
+ this.byte_offset = 0
+ this.pending_line = ''
+ this.listener_task = null
+
+ this.dispatch = this.dispatch.bind(this)
+ this.emit = this.emit.bind(this)
+ this.on = this.on.bind(this)
+ }
+
+ on(event_pattern: EventClass, handler: EventHandlerFunction): void
+ on(event_pattern: string | '*', handler: UntypedEventHandlerFunction): void
+ on(event_pattern: EventPattern | '*', handler: EventHandlerFunction | UntypedEventHandlerFunction): void {
+ this.ensureStarted()
+ if (typeof event_pattern === 'string') {
+ this.inbound_bus.on(event_pattern, handler as UntypedEventHandlerFunction)
+ return
+ }
+ this.inbound_bus.on(event_pattern as EventClass, handler as EventHandlerFunction)
+ }
+
+ async dispatch(event: T): Promise {
+ this.ensureStarted()
+ const fs = await this.loadFs()
+ await fs.promises.mkdir(this.dirname(this.path), { recursive: true })
+ const payload = JSON.stringify(event.toJSON()) + '\n'
+ await fs.promises.appendFile(this.path, payload, 'utf8')
+ }
+
+ async emit(event: T): Promise {
+ return this.dispatch(event)
+ }
+
+ async start(): Promise {
+ if (this.running) return
+ const fs = await this.loadFs()
+ await fs.promises.mkdir(this.dirname(this.path), { recursive: true })
+ await fs.promises.appendFile(this.path, '', 'utf8')
+ const stats = await fs.promises.stat(this.path)
+ this.byte_offset = Number(stats.size ?? 0)
+ this.pending_line = ''
+ this.running = true
+ this.listener_task = this.listenLoop()
+ }
+
+ async close(): Promise {
+ this.running = false
+ await Promise.allSettled(this.listener_task ? [this.listener_task] : [])
+ this.listener_task = null
+ this.inbound_bus.destroy()
+ }
+
+ private ensureStarted(): void {
+ if (this.running || this.listener_task) return
+ void this.start().catch((error: unknown) => {
+ console.error('[bubus] JSONLEventBridge failed to start', error)
+ })
+ }
+
+ private async listenLoop(): Promise {
+ while (this.running) {
+ try {
+ await this.pollNewLines()
+ } catch {
+ // Keep polling on transient errors.
+ }
+ await new Promise((resolve) => setTimeout(resolve, Math.max(1, this.poll_interval * 1000)))
+ }
+ }
+
+ private async pollNewLines(): Promise {
+ const previous_offset = this.byte_offset
+ const { chunk, next_offset } = await this.readAppended(previous_offset)
+ this.byte_offset = next_offset
+ if (next_offset < previous_offset) {
+ this.pending_line = ''
+ }
+ if (!chunk) return
+
+ const new_lines = (this.pending_line + chunk).split('\n')
+ this.pending_line = new_lines.pop() ?? ''
+
+ for (const line of new_lines) {
+ const trimmed = line.trim()
+ if (!trimmed) continue
+ try {
+ const payload = JSON.parse(trimmed)
+ await this.dispatchInboundPayload(payload)
+ } catch {
+ // Ignore malformed line.
+ }
+ }
+ }
+
+ private async dispatchInboundPayload(payload: unknown): Promise {
+ const event = BaseEvent.fromJSON(payload).reset()
+ this.inbound_bus.dispatch(event)
+ }
+
+ private async readAppended(offset: number): Promise<{ chunk: string; next_offset: number }> {
+ const fs = await this.loadFs()
+ let size = 0
+ try {
+ const stats = await fs.promises.stat(this.path)
+ size = Number(stats.size ?? 0)
+ } catch (error: unknown) {
+ const code = (error as { code?: string }).code
+ if (code === 'ENOENT') {
+ return { chunk: '', next_offset: 0 }
+ }
+ throw error
+ }
+
+ const start_offset = size < offset ? 0 : offset
+ if (size === start_offset) {
+ return { chunk: '', next_offset: size }
+ }
+
+ const handle = await fs.promises.open(this.path, 'r')
+ try {
+ const byte_count = size - start_offset
+ const bytes = new Uint8Array(byte_count)
+ const { bytesRead } = await handle.read(bytes, 0, byte_count, start_offset)
+ const chunk = new TextDecoder().decode(bytes.subarray(0, Number(bytesRead ?? 0)))
+ return { chunk, next_offset: start_offset + Number(bytesRead ?? 0) }
+ } finally {
+ await handle.close()
+ }
+ }
+
+ private dirname(path: string): string {
+ const idx = path.lastIndexOf('/')
+ return idx >= 0 ? path.slice(0, idx) || '.' : '.'
+ }
+
+ private async loadFs(): Promise {
+ if (!isNodeRuntime()) {
+ throw new Error('JSONLEventBridge is only supported in Node.js runtimes')
+ }
+ return importNodeModule('node:fs')
+ }
+}
diff --git a/bubus-ts/src/bridge_nats.ts b/bubus-ts/src/bridge_nats.ts
new file mode 100644
index 0000000..7423190
--- /dev/null
+++ b/bubus-ts/src/bridge_nats.ts
@@ -0,0 +1,104 @@
+import { BaseEvent } from './base_event.js'
+import { EventBus } from './event_bus.js'
+import { assertOptionalDependencyAvailable, importOptionalDependency, isNodeRuntime } from './optional_deps.js'
+import type { EventClass, EventHandlerFunction, EventPattern, UntypedEventHandlerFunction } from './types.js'
+
+const randomSuffix = (): string => Math.random().toString(36).slice(2, 10)
+
+export class NATSEventBridge {
+ readonly server: string
+ readonly subject: string
+ readonly name: string
+
+ private readonly inbound_bus: EventBus
+ private running: boolean
+ private nc: any | null
+ private sub_task: Promise | null
+
+ constructor(server: string, subject: string, name?: string) {
+ assertOptionalDependencyAvailable('NATSEventBridge', 'nats')
+
+ this.server = server
+ this.subject = subject
+ this.name = name ?? `NATSEventBridge_${randomSuffix()}`
+ this.inbound_bus = new EventBus(this.name, { max_history_size: 0 })
+ this.running = false
+ this.nc = null
+ this.sub_task = null
+
+ this.dispatch = this.dispatch.bind(this)
+ this.emit = this.emit.bind(this)
+ this.on = this.on.bind(this)
+ }
+
+ on(event_pattern: EventClass, handler: EventHandlerFunction): void
+ on(event_pattern: string | '*', handler: UntypedEventHandlerFunction): void
+ on(event_pattern: EventPattern | '*', handler: EventHandlerFunction | UntypedEventHandlerFunction): void {
+ this.ensureStarted()
+ if (typeof event_pattern === 'string') {
+ this.inbound_bus.on(event_pattern, handler as UntypedEventHandlerFunction)
+ return
+ }
+ this.inbound_bus.on(event_pattern as EventClass, handler as EventHandlerFunction)
+ }
+
+ async dispatch(event: T): Promise {
+ this.ensureStarted()
+ if (!this.nc) await this.start()
+
+ const payload = JSON.stringify(event.toJSON())
+ this.nc.publish(this.subject, new TextEncoder().encode(payload))
+ }
+
+ async emit(event: T): Promise {
+ return this.dispatch(event)
+ }
+
+ async start(): Promise {
+ if (this.running) return
+ if (!isNodeRuntime()) {
+ throw new Error('NATSEventBridge is only supported in Node.js runtimes')
+ }
+
+ const mod = await importOptionalDependency('NATSEventBridge', 'nats')
+ const connect = mod.connect
+ this.nc = await connect({ servers: this.server })
+ const sub = this.nc.subscribe(this.subject)
+
+ this.running = true
+ this.sub_task = (async () => {
+ for await (const msg of sub) {
+ try {
+ const payload = JSON.parse(new TextDecoder().decode(msg.data))
+ await this.dispatchInboundPayload(payload)
+ } catch {
+ // Ignore malformed payloads.
+ }
+ }
+ })()
+ }
+
+ async close(): Promise {
+ this.running = false
+ if (this.nc) {
+ await this.nc.drain()
+ await this.nc.close()
+ this.nc = null
+ }
+ await Promise.allSettled(this.sub_task ? [this.sub_task] : [])
+ this.sub_task = null
+ this.inbound_bus.destroy()
+ }
+
+ private ensureStarted(): void {
+ if (this.running) return
+ void this.start().catch((error: unknown) => {
+ console.error('[bubus] NATSEventBridge failed to start', error)
+ })
+ }
+
+ private async dispatchInboundPayload(payload: unknown): Promise {
+ const event = BaseEvent.fromJSON(payload).reset()
+ this.inbound_bus.dispatch(event)
+ }
+}
diff --git a/bubus-ts/src/bridge_postgres.ts b/bubus-ts/src/bridge_postgres.ts
new file mode 100644
index 0000000..a9f8f9d
--- /dev/null
+++ b/bubus-ts/src/bridge_postgres.ts
@@ -0,0 +1,277 @@
+/**
+ * PostgreSQL LISTEN/NOTIFY + flat-table bridge for forwarding events.
+ */
+import { BaseEvent } from './base_event.js'
+import { EventBus } from './event_bus.js'
+import { assertOptionalDependencyAvailable, importOptionalDependency, isNodeRuntime } from './optional_deps.js'
+import type { EventClass, EventHandlerFunction, EventPattern, UntypedEventHandlerFunction } from './types.js'
+
+const randomSuffix = (): string => Math.random().toString(36).slice(2, 10)
+const IDENTIFIER_RE = /^[A-Za-z_][A-Za-z0-9_]*$/
+const DEFAULT_POSTGRES_TABLE = 'bubus_events'
+const DEFAULT_POSTGRES_CHANNEL = 'bubus_events'
+const EVENT_PAYLOAD_COLUMN = 'event_payload'
+
+const validateIdentifier = (value: string, label: string): string => {
+ if (!IDENTIFIER_RE.test(value)) {
+ throw new Error(`Invalid ${label}: ${JSON.stringify(value)}. Use only [A-Za-z0-9_] and start with a letter/_`)
+ }
+ return value
+}
+
+const indexName = (table: string, suffix: string): string => validateIdentifier(`${table}_${suffix}`.slice(0, 63), 'index name')
+
+const parseTableUrl = (table_url: string): { dsn: string; table: string } => {
+ let parsed: URL
+ try {
+ parsed = new URL(table_url)
+ } catch {
+ throw new Error(
+ 'PostgresEventBridge URL must include at least database in path, e.g. postgresql://user:pass@host:5432/dbname[/tablename]'
+ )
+ }
+
+ const segments = parsed.pathname.split('/').filter(Boolean)
+ if (segments.length < 1) {
+ throw new Error(
+ 'PostgresEventBridge URL must include at least database in path, e.g. postgresql://user:pass@host:5432/dbname[/tablename]'
+ )
+ }
+
+ const db_name = segments[0]
+ const table = segments.length >= 2 ? validateIdentifier(segments[1], 'table name') : DEFAULT_POSTGRES_TABLE
+ const dsn_url = new URL(parsed.toString())
+ dsn_url.pathname = `/${db_name}`
+ return { dsn: dsn_url.toString(), table }
+}
+
+const splitBridgePayload = (
+ payload: Record
+): { event_fields: Record; event_payload: Record } => {
+ const event_fields: Record = {}
+ const event_payload: Record = { ...payload }
+ for (const [key, value] of Object.entries(payload)) {
+ if (key.startsWith('event_')) {
+ event_fields[key] = value
+ }
+ }
+ return { event_fields, event_payload }
+}
+
+export class PostgresEventBridge {
+ readonly table_url: string
+ readonly dsn: string
+ readonly table: string
+ readonly channel: string
+ readonly name: string
+
+ private readonly inbound_bus: EventBus
+ private running: boolean
+ private client: any | null
+ private table_columns: Set
+ private notification_handler: ((msg: { channel: string; payload?: string }) => void) | null
+
+ constructor(table_url: string, channel?: string, name?: string) {
+ assertOptionalDependencyAvailable('PostgresEventBridge', 'pg')
+
+ const parsed = parseTableUrl(table_url)
+ this.table_url = table_url
+ this.dsn = parsed.dsn
+ this.table = parsed.table
+
+ const derived_channel = channel ?? DEFAULT_POSTGRES_CHANNEL
+ this.channel = validateIdentifier(derived_channel.slice(0, 63), 'channel name')
+ this.name = name ?? `PostgresEventBridge_${randomSuffix()}`
+
+ this.inbound_bus = new EventBus(this.name, { max_history_size: 0 })
+ this.running = false
+ this.client = null
+ this.table_columns = new Set(['event_id', 'event_created_at', 'event_type', EVENT_PAYLOAD_COLUMN])
+ this.notification_handler = null
+
+ this.dispatch = this.dispatch.bind(this)
+ this.emit = this.emit.bind(this)
+ this.on = this.on.bind(this)
+ }
+
+ on(event_pattern: EventClass, handler: EventHandlerFunction): void
+ on(event_pattern: string | '*', handler: UntypedEventHandlerFunction): void
+ on(event_pattern: EventPattern | '*', handler: EventHandlerFunction | UntypedEventHandlerFunction): void {
+ this.ensureStarted()
+ if (typeof event_pattern === 'string') {
+ this.inbound_bus.on(event_pattern, handler as UntypedEventHandlerFunction)
+ return
+ }
+ this.inbound_bus.on(event_pattern as EventClass, handler as EventHandlerFunction)
+ }
+
+ async dispatch(event: T): Promise {
+ this.ensureStarted()
+ if (!this.client) await this.start()
+
+ const payload = event.toJSON() as Record
+ const { event_fields, event_payload } = splitBridgePayload(payload)
+ const write_payload: Record = { ...event_fields, [EVENT_PAYLOAD_COLUMN]: event_payload }
+ const keys = Object.keys(write_payload).sort()
+ await this.ensureColumns(keys)
+
+ const columns_sql = keys.map((key) => `"${key}"`).join(', ')
+ const placeholders_sql = keys.map((_, index) => `$${index + 1}`).join(', ')
+ const values = keys.map((key) =>
+ write_payload[key] === null || write_payload[key] === undefined ? null : JSON.stringify(write_payload[key])
+ )
+
+ const update_fields = keys.filter((key) => key !== 'event_id')
+ let upsert_sql = `INSERT INTO "${this.table}" (${columns_sql}) VALUES (${placeholders_sql})`
+ if (update_fields.length > 0) {
+ const updates_sql = update_fields.map((key) => `"${key}" = EXCLUDED."${key}"`).join(', ')
+ upsert_sql += ` ON CONFLICT ("event_id") DO UPDATE SET ${updates_sql}`
+ } else {
+ upsert_sql += ' ON CONFLICT ("event_id") DO NOTHING'
+ }
+
+ await this.client.query(upsert_sql, values)
+ await this.client.query('SELECT pg_notify($1, $2)', [this.channel, JSON.stringify(String(event.event_id))])
+ }
+
+ async emit(event: T): Promise {
+ return this.dispatch(event)
+ }
+
+ async start(): Promise {
+ if (this.running) return
+ if (!isNodeRuntime()) {
+ throw new Error('PostgresEventBridge is only supported in Node.js runtimes')
+ }
+
+ const mod = await importOptionalDependency('PostgresEventBridge', 'pg')
+ const Client = mod.Client ?? mod.default?.Client
+ this.client = new Client({ connectionString: this.dsn })
+ this.client.on('error', () => {})
+ await this.client.connect()
+
+ await this.ensureTableExists()
+ await this.refreshColumnCache()
+ await this.ensureColumns(['event_id', 'event_created_at', 'event_type', EVENT_PAYLOAD_COLUMN])
+ await this.ensureBaseIndexes()
+
+ this.notification_handler = (msg: { channel: string; payload?: string }) => {
+ if (msg.channel !== this.channel || !msg.payload) return
+ void this.dispatchByEventId(msg.payload).catch(() => {
+ // Ignore transient shutdown races while closing connections.
+ })
+ }
+
+ this.client.on('notification', this.notification_handler)
+ await this.client.query(`LISTEN ${this.channel}`)
+ this.running = true
+ }
+
+ async close(): Promise {
+ this.running = false
+ if (this.client) {
+ try {
+ await this.client.query(`UNLISTEN ${this.channel}`)
+ } catch {
+ // ignore
+ }
+ if (this.notification_handler) {
+ this.client.off('notification', this.notification_handler)
+ this.notification_handler = null
+ }
+ await this.client.end()
+ this.client = null
+ }
+ this.inbound_bus.destroy()
+ }
+
+ private ensureStarted(): void {
+ if (this.running) return
+ void this.start().catch((error: unknown) => {
+ console.error('[bubus] PostgresEventBridge failed to start', error)
+ })
+ }
+
+ private async dispatchByEventId(event_id: string): Promise {
+ if (!this.running || !this.client) return
+ const result = await this.client.query(`SELECT * FROM "${this.table}" WHERE "event_id" = $1`, [event_id])
+ const row = result.rows?.[0] as Record | undefined
+ if (!row) return
+
+ const payload: Record = {}
+ const raw_event_payload = row[EVENT_PAYLOAD_COLUMN]
+ if (typeof raw_event_payload === 'string') {
+ try {
+ const decoded_event_payload = JSON.parse(raw_event_payload)
+ if (decoded_event_payload && typeof decoded_event_payload === 'object' && !Array.isArray(decoded_event_payload)) {
+ Object.assign(payload, decoded_event_payload as Record)
+ }
+ } catch {
+ // ignore malformed payload column
+ }
+ }
+
+ for (const [key, raw_value] of Object.entries(row)) {
+ if (key === EVENT_PAYLOAD_COLUMN || !key.startsWith('event_')) continue
+ if (raw_value === null || raw_value === undefined) continue
+ if (typeof raw_value !== 'string') {
+ payload[key] = raw_value
+ continue
+ }
+ try {
+ payload[key] = JSON.parse(raw_value)
+ } catch {
+ payload[key] = raw_value
+ }
+ }
+
+ await this.dispatchInboundPayload(payload)
+ }
+
+ private async dispatchInboundPayload(payload: unknown): Promise {
+ const event = BaseEvent.fromJSON(payload).reset()
+ this.inbound_bus.dispatch(event)
+ }
+
+ private async ensureTableExists(): Promise {
+ if (!this.client) return
+ await this.client.query(
+ `CREATE TABLE IF NOT EXISTS "${this.table}" ("event_id" TEXT PRIMARY KEY, "event_created_at" TEXT, "event_type" TEXT, "event_payload" TEXT)`
+ )
+ }
+
+ private async ensureBaseIndexes(): Promise {
+ if (!this.client) return
+
+ const event_created_at_idx = indexName(this.table, 'event_created_at_idx')
+ const event_type_idx = indexName(this.table, 'event_type_idx')
+
+ await this.client.query(`CREATE INDEX IF NOT EXISTS "${event_created_at_idx}" ON "${this.table}" ("event_created_at")`)
+ await this.client.query(`CREATE INDEX IF NOT EXISTS "${event_type_idx}" ON "${this.table}" ("event_type")`)
+ }
+
+ private async refreshColumnCache(): Promise {
+ if (!this.client) return
+ const result = await this.client.query(
+ `SELECT column_name FROM information_schema.columns WHERE table_schema = 'public' AND table_name = $1`,
+ [this.table]
+ )
+ this.table_columns = new Set((result.rows as Array<{ column_name: string }>).map((row) => row.column_name))
+ }
+
+ private async ensureColumns(keys: string[]): Promise {
+ if (!this.client) return
+ for (const key of keys) {
+ validateIdentifier(key, 'event field name')
+ if (key !== EVENT_PAYLOAD_COLUMN && !key.startsWith('event_')) {
+ throw new Error(`Invalid event field name for bridge column: ${JSON.stringify(key)}. Only event_* fields become columns`)
+ }
+ }
+
+ const missing = keys.filter((key) => !this.table_columns.has(key))
+ for (const key of missing) {
+ await this.client.query(`ALTER TABLE "${this.table}" ADD COLUMN IF NOT EXISTS "${key}" TEXT`)
+ this.table_columns.add(key)
+ }
+ }
+}
diff --git a/bubus-ts/src/bridge_redis.ts b/bubus-ts/src/bridge_redis.ts
new file mode 100644
index 0000000..275aad9
--- /dev/null
+++ b/bubus-ts/src/bridge_redis.ts
@@ -0,0 +1,194 @@
+/**
+ * Redis pub/sub bridge for forwarding events between runtimes.
+ *
+ * Usage:
+ * // channel from URL path
+ * const bridge = new RedisEventBridge('redis://user:pass@localhost:6379/1/my_channel')
+ *
+ * // explicit channel override
+ * const bridge2 = new RedisEventBridge('redis://user:pass@localhost:6379/1', 'my_channel')
+ *
+ * URL format:
+ * redis://user:pass@host:6379//
+ */
+import { BaseEvent } from './base_event.js'
+import { EventBus } from './event_bus.js'
+import { assertOptionalDependencyAvailable, importOptionalDependency, isNodeRuntime } from './optional_deps.js'
+import type { EventClass, EventHandlerFunction, EventPattern, UntypedEventHandlerFunction } from './types.js'
+
+const randomSuffix = (): string => Math.random().toString(36).slice(2, 10)
+const DEFAULT_REDIS_CHANNEL = 'bubus_events'
+const DB_INIT_KEY = '__bubus:bridge_init__'
+
+const parseRedisUrl = (redis_url: string, channel?: string): { url: string; channel: string } => {
+ let parsed: URL
+ try {
+ parsed = new URL(redis_url)
+ } catch {
+ throw new Error(`RedisEventBridge URL must be a valid redis:// or rediss:// URL, got: ${redis_url}`)
+ }
+
+ const protocol = parsed.protocol.replace(/:$/, '').toLowerCase()
+ if (protocol !== 'redis' && protocol !== 'rediss') {
+ throw new Error(`RedisEventBridge URL must use redis:// or rediss://, got: ${redis_url}`)
+ }
+
+ const segments = parsed.pathname.split('/').filter(Boolean)
+ if (segments.length > 2) {
+ throw new Error(`RedisEventBridge URL path must be / or //, got: ${parsed.pathname || '/'}`)
+ }
+
+ let db_index = '0'
+ let channel_from_url: string | undefined
+
+ if (segments.length > 0) {
+ db_index = segments[0]
+ if (!/^\d+$/.test(db_index)) {
+ throw new Error(`RedisEventBridge URL db path segment must be numeric, got: ${JSON.stringify(db_index)} in ${redis_url}`)
+ }
+ if (segments.length === 2) {
+ channel_from_url = segments[1]
+ }
+ }
+
+ const resolved_channel = channel ?? channel_from_url ?? DEFAULT_REDIS_CHANNEL
+ if (!resolved_channel) {
+ throw new Error('RedisEventBridge channel must not be empty')
+ }
+
+ const normalized = new URL(parsed.toString())
+ normalized.pathname = `/${db_index}`
+ return { url: normalized.toString(), channel: resolved_channel }
+}
+
+export class RedisEventBridge {
+ readonly url: string
+ readonly channel: string
+ readonly name: string
+
+ private readonly inbound_bus: EventBus
+ private running: boolean
+ private start_promise: Promise | null
+ private redis_pub: any | null
+ private redis_sub: any | null
+
+ constructor(redis_url: string, channel?: string, name?: string) {
+ assertOptionalDependencyAvailable('RedisEventBridge', 'ioredis')
+
+ const parsed = parseRedisUrl(redis_url, channel)
+ this.url = parsed.url
+ this.channel = parsed.channel
+ this.name = name ?? `RedisEventBridge_${randomSuffix()}`
+ this.inbound_bus = new EventBus(this.name, { max_history_size: 0 })
+ this.running = false
+ this.start_promise = null
+ this.redis_pub = null
+ this.redis_sub = null
+
+ this.dispatch = this.dispatch.bind(this)
+ this.emit = this.emit.bind(this)
+ this.on = this.on.bind(this)
+ }
+
+ on(event_pattern: EventClass, handler: EventHandlerFunction): void
+ on(event_pattern: string | '*', handler: UntypedEventHandlerFunction): void
+ on(event_pattern: EventPattern | '*', handler: EventHandlerFunction | UntypedEventHandlerFunction): void {
+ this.ensureStarted()
+ if (typeof event_pattern === 'string') {
+ this.inbound_bus.on(event_pattern, handler as UntypedEventHandlerFunction)
+ return
+ }
+ this.inbound_bus.on(event_pattern as EventClass, handler as EventHandlerFunction)
+ }
+
+ async dispatch(event: T): Promise {
+ this.ensureStarted()
+ if (!this.redis_pub) await this.start()
+ const payload = JSON.stringify(event.toJSON())
+ await this.redis_pub.publish(this.channel, payload)
+ }
+
+ async emit(event: T): Promise