diff --git a/.claude/settings.local.json b/.claude/settings.local.json index f52b501..3cf27d5 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -20,7 +20,11 @@ "Bash(echo:*)", "Bash(grep:*)", "Bash(rg:*)", - "Bash(.venv/bin/pytest tests/test_typed_event_results.py::test_builtin_type_casting -v -s --timeout=10)" + "WebFetch(domain:github.com)", + "Bash(timeout 60 .venv/bin/pytest:*)", + "Bash(timeout 180 .venv/bin/pytest tests/ -v)", + "Bash(timeout 180 .venv/bin/pytest:*)", + "Bash(git tag:*)" ], "deny": [] } diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml new file mode 100644 index 0000000..30bcfcb --- /dev/null +++ b/.github/workflows/publish-npm.yml @@ -0,0 +1,52 @@ +name: publish-npm + +on: + release: + types: [published] + workflow_dispatch: + inputs: + tag: + description: npm dist-tag to publish under + required: false + default: latest + +permissions: + contents: read + id-token: write + +jobs: + publish_to_npm: + runs-on: ubuntu-latest + defaults: + run: + working-directory: bubus-ts + steps: + - uses: actions/checkout@v4 + + - uses: pnpm/action-setup@v4 + with: + version: 10 + + - uses: actions/setup-node@v4 + with: + node-version: 22 + cache: pnpm + cache-dependency-path: bubus-ts/pnpm-lock.yaml + registry-url: https://registry.npmjs.org + + - run: pnpm install --frozen-lockfile + - run: pnpm run typecheck + - run: pnpm test + - run: pnpm run build + + - name: Publish release tag + if: github.event_name == 'release' + run: pnpm publish --access public --no-git-checks + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + + - name: Publish manual tag + if: github.event_name == 'workflow_dispatch' + run: pnpm publish --access public --tag "${{ inputs.tag }}" --no-git-checks + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/.gitignore b/.gitignore index 6d5adec..8960285 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ CLAUDE.local.md # Build files dist/ +node_modules/ # Coverage files .coverage @@ -27,7 +28,7 @@ dist/ htmlcov/ coverage.xml *.cover - +*.sqlite* # Secrets and sensitive files secrets.env diff --git a/README.md b/README.md index afd7ed8..66316e9 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,35 @@ -# `bubus`: πŸ“’ Production-ready event bus library for Python +# `bubus`: πŸ“’ Production-ready multi-language event bus -Bubus is a fully-featured, Pydantic-powered event bus library for async Python. +image -It's designed for quickly building event-driven applications with Python in a way that "just works" with async support, proper support for nested events, and real concurrency control. +[![DeepWiki: Python](https://img.shields.io/badge/DeepWiki-bbus%2FPython-yellow.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/pirate/bbus) ![PyPI - Version](https://img.shields.io/pypi/v/bubus) ![GitHub License](https://img.shields.io/github/license/pirate/bbus) ![GitHub last commit](https://img.shields.io/github/last-commit/pirate/bbus) -It provides a [pydantic](https://docs.pydantic.dev/latest/)-based API for implementing publish-subscribe patterns with type safety, async/sync handler support, and advanced features like event forwarding between buses. +[![DeepWiki: TS](https://img.shields.io/badge/DeepWiki-bbus%2FTypescript-blue.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/pirate/bbus/3-typescript-implementation) ![NPM Version](https://img.shields.io/npm/v/bubus) -♾️ It's inspired by the simplicity of async and events in `JS`, we aim to bring a fully type-checked [`EventTarget`](https://developer.mozilla.org/en-US/docs/Web/API/EventTarget)-style API to Python. +Bubus is an in-memory event bus library for async Python and TS (node/browser). + +It's designed for quickly building resilient, predictable, complex event-driven apps. + +It "just works" with an intuitive, but powerful event JSON format + dispatch API that's consistent across both languages and scales consistently from one even up to millions: +```python +bus.on(SomeEvent, some_function) +bus.emit(SomeEvent({some_data: 132})) +``` + +It's async native, has proper automatic nested event tracking, and powerful concurrency control options. The API is inspired by `EventEmitter` or [`emittery`](https://github.com/sindresorhus/emittery) in JS, but it takes it a step further: + +- nice Pydantic / Zod schemas for events that can be exchanged between both languages +- automatic UUIDv7s and monotonic nanosecond timestamps for ordering events globally +- built in locking options to force strict global FIFO procesing or fully parallel processing + +--- + +♾️ It's inspired by the simplicity of async and events in `JS` but with baked-in features that allow to eliminate most of the tedious repetitive complexity in event-driven codebases: + +- correct timeout enforcement across multiple levels of events, if a parent times out it correctly aborts all child event processing +- ability to strongly type hint and enforce the return type of event handlers at compile-time +- ability to queue events on the bus, or inline await them for immediate execution like a normal function call +- handles ~5,000 events/sec/core in both languages, with ~2kb/event RAM consumed per event during active processing
@@ -15,7 +38,7 @@ It provides a [pydantic](https://docs.pydantic.dev/latest/)-based API for implem Install bubus and get started with a simple event-driven application: ```bash -pip install bubus +pip install bubus # see ./bubus-ts/README.md for JS instructions ``` ```python @@ -29,7 +52,7 @@ class UserLoginEvent(BaseEvent[str]): async def handle_login(event: UserLoginEvent) -> str: auth_request = await event.event_bus.dispatch(AuthRequestEvent(...)) # nested events supported - auth_response = await event.event_bus.expect(AuthResponseEvent, timeout=30.0) + auth_response = await event.event_bus.find(AuthResponseEvent, child_of=auth_request, future=30) return f"User {event.username} logged in admin={event.is_admin} with API response: {await auth_response.event_result()}" bus = EventBus() @@ -104,9 +127,9 @@ class SomeService: return 'this works too' # All usage patterns behave the same: -bus.on(SomeEvent, SomeClass().handlers_can_be_methods) -bus.on(SomeEvent, SomeClass.handler_can_be_classmethods) -bus.on(SomeEvent, SomeClass.handlers_can_be_staticmethods) +bus.on(SomeEvent, SomeService().handlers_can_be_methods) +bus.on(SomeEvent, SomeService.handler_can_be_classmethods) +bus.on(SomeEvent, SomeService.handlers_can_be_staticmethods) ```
@@ -181,6 +204,7 @@ bus.on(GetConfigEvent, load_user_config) bus.on(GetConfigEvent, load_system_config) # Get a merger of all dict results +# (conflicting keys raise ValueError unless raise_if_conflicts=False) event = await bus.dispatch(GetConfigEvent()) config = await event.event_results_flat_dict(raise_if_conflicts=False) # {'debug': False, 'port': 8080, 'timeout': 30} @@ -270,47 +294,78 @@ if __name__ == '__main__':

-### ⏳ Expect an Event to be Dispatched +### πŸ”Ž Find Events in History or Wait for Future Events -Wait for specific events to be seen on a bus with optional filtering: +The `find()` method provides a unified way to search past event history and/or wait for future events. It's the recommended approach for most event lookup scenarios. -```python -# Block until a specific event is seen (with optional timeout) -request_event = await bus.dispatch(RequestEvent(id=123, table='invoices', request_id=999234)) -response_event = await bus.expect(ResponseEvent, timeout=30) -``` +The `past` and `future` parameters accept either `bool` or `float` values: -A more complex real-world example showing off all the features: +| Value | `past` meaning | `future` meaning | +|-------|----------------|------------------| +| `True` | Search all history | Wait forever | +| `False` | Skip history search | Don't wait | +| `5.0` | Search last 5 seconds | Wait up to 5 seconds | ```python -async def on_generate_invoice_pdf(event: GenerateInvoiceEvent) -> pdf: - request_event = await bus.dispatch(APIRequestEvent( # example: fire a backend request via some RPC client using bubus - method='invoices.generatePdf', - invoice_id=event.invoice_id, - request_id=uuid4(), - )) - # ...rpc client should send the request, then call event_bus.dispatch(APIResponseEvent(...)) when it gets a response ... +# Search all history, wait up to 5s for future +event = await bus.find(ResponseEvent, past=True, future=5) + +# Search last 5s of history, wait forever +event = await bus.find(ResponseEvent, past=5, future=True) + +# Search last 5s of history, wait up to 5s +event = await bus.find(ResponseEvent, past=5, future=5) + +# Search all history only, don't wait (instant) +event = await bus.find(ResponseEvent, past=True, future=False) - # wait for the response event to be fired by the RPC client - is_our_response = lambda response_event: response_event.request_id == request_event.request_id - is_succesful = lambda response_event: response_event.invoice_id == event.invoice_id and response_event.invoice_url - try: - response_event: APIResponseEvent = await bus.expect( - APIResponseEvent, # wait for events of this type (also accepts str name) - include=lambda e: is_our_response(e) and is_succesful(e), # only include events that match a certain filter func - exclude=lambda e: e.status != 'retrying', # optionally exclude certain events, overrides include - timeout=30, # raises asyncio.TimeoutError if no match is seen within 30sec - ) - except TimeoutError: - await bus.dispatch(TimedOutError(msg='timed out while waiting for response from server', request_id=request_event.id)) +# Wait up to 5s for future only (like expect()) +event = await bus.find(ResponseEvent, past=False, future=5) - return response_event.invoice_url +# With custom filter +event = await bus.find(ResponseEvent, where=lambda e: e.request_id == my_id, future=5) +``` + +#### Finding Child Events + +When you dispatch an event that triggers child events, use `child_of` to find specific descendants: + +```python +# Dispatch a parent event that triggers child events +nav_event = await bus.dispatch(NavigateToUrlEvent(url="https://example.com")) -event_bus.on(GenerateInvoiceEvent, on_generate_invoice_pdf) +# Find a child event (already fired while NavigateToUrlEvent was being handled) +new_tab = await bus.find(TabCreatedEvent, child_of=nav_event, past=5) +if new_tab: + print(f"New tab created: {new_tab.tab_id}") ``` +This solves race conditions where child events fire before you start waiting for them. + > [!IMPORTANT] -> `expect()` resolves when the event is first *dispatched* to the `EventBus`, not when it completes. `await response_event` to get the completed event. +> `find()` resolves when the event is first *dispatched* to the `EventBus`, not when it completes. Use `await event` to wait for handlers to finish. +> If no match is found (or future timeout elapses), `find()` returns `None`. + +
+ +### πŸ” Event Debouncing + +Avoid re-running expensive work by reusing recent events. The `find()` method makes debouncing simple: + +```python +# Simple debouncing: reuse event from last 10 seconds, or dispatch new +event = ( + bus.find(ScreenshotEvent, past=10, future=False) # Check last 10s of history (instant) + or await bus.dispatch(ScreenshotEvent()) +) + +# Advanced: check history, wait briefly for new event to appear, fallback to dispatch new event +event = ( + await bus.find(SyncEvent, past=True, future=False) # Check all history (instant) + or await bus.find(SyncEvent, past=False, future=5) # Wait up to 5s for in-flight + or await bus.dispatch(SyncEvent()) # Fallback: dispatch new +) +```
@@ -412,12 +467,75 @@ email_list = await event_bus.dispatch(FetchInboxEvent(account_id='124', ...)).ev
+### 🧡 ContextVar Propagation + +ContextVars set before `dispatch()` are automatically propagated to event handlers. This is essential for request-scoped context like request IDs, user sessions, or tracing spans: + +```python +from contextvars import ContextVar + +# Define your context variables +request_id: ContextVar[str] = ContextVar('request_id', default='') +user_id: ContextVar[str] = ContextVar('user_id', default='') + +async def handler(event: MyEvent) -> str: + # Handler sees the context values that were set before dispatch() + print(f"Request: {request_id.get()}, User: {user_id.get()}") + return "done" + +bus.on(MyEvent, handler) + +# Set context before dispatch (e.g., in FastAPI middleware) +request_id.set('req-12345') +user_id.set('user-abc') + +# Handler will see request_id='req-12345' and user_id='user-abc' +await bus.dispatch(MyEvent()) +``` + +**Context propagates through nested handlers:** + +```python +async def parent_handler(event: ParentEvent) -> str: + # Context is captured at dispatch time + print(f"Parent sees: {request_id.get()}") # 'req-12345' + + # Child events inherit the same context + await bus.dispatch(ChildEvent()) + return "parent_done" + +async def child_handler(event: ChildEvent) -> str: + # Child also sees the original dispatch context + print(f"Child sees: {request_id.get()}") # 'req-12345' + return "child_done" +``` + +**Context isolation between dispatches:** + +Each dispatch captures its own context snapshot. Concurrent dispatches with different context values are properly isolated: + +```python +request_id.set('req-A') +event_a = bus.dispatch(MyEvent()) # Handler A sees 'req-A' + +request_id.set('req-B') +event_b = bus.dispatch(MyEvent()) # Handler B sees 'req-B' + +await event_a # Still sees 'req-A' +await event_b # Still sees 'req-B' +``` + +> [!NOTE] +> Context is captured at `dispatch()` time, not when the handler executes. This ensures handlers see the context from the call site, even if the event is processed later from a queue. + +
+ ### 🧹 Memory Management EventBus includes automatic memory management to prevent unbounded growth in long-running applications: ```python -# Create a bus with memory limits (default: 50 events) +# Create a bus with memory limits (default: 100 events) bus = EventBus(max_history_size=100) # Keep max 100 events in history # Or disable memory limits for unlimited history @@ -477,11 +595,25 @@ await bus.dispatch(DataEvent()) Persist events automatically to a `jsonl` file for future replay and debugging: ```python +from pathlib import Path + +from bubus import EventBus, SQLiteHistoryMirrorMiddleware +from bubus.middlewares import LoggerEventBusMiddleware, WALEventBusMiddleware + # Enable WAL event log persistence (optional) -bus = EventBus(name='MyBus', wal_path='./events.jsonl') +bus = EventBus( + name='MyBus', + middlewares=[ + SQLiteHistoryMirrorMiddleware('./events.sqlite'), + WALEventBusMiddleware('./events.jsonl'), + LoggerEventBusMiddleware('./events.log'), + ], +) + +# LoggerEventBusMiddleware defaults to stdout-only logging if no file path is provided # All completed events are automatically appended as JSON lines to the end -bus.dispatch(SecondEventAbc(some_key="banana")) +await bus.dispatch(SecondEventAbc(some_key="banana")) ``` `./events.jsonl`: @@ -507,17 +639,43 @@ The main event bus class that manages event processing and handler execution. ```python EventBus( name: str | None = None, - wal_path: Path | str | None = None, parallel_handlers: bool = False, - max_history_size: int | None = 50 + max_history_size: int | None = 50, + middlewares: Sequence[EventBusMiddleware | type[EventBusMiddleware]] | None = None, ) ``` **Parameters:** - `name`: Optional unique name for the bus (auto-generated if not provided) -- `wal_path`: Path for write-ahead logging of events to a `jsonl` file (optional) - `parallel_handlers`: If `True`, handlers run concurrently for each event, otherwise serially if `False` (the default) +- `middlewares`: Optional list of `EventBusMiddleware` subclasses or instances that hook into handler execution for analytics, logging, retries, etc. + +Handler middlewares subclass `EventBusMiddleware` and override whichever lifecycle hooks they need: + +```python +from bubus.middlewares import EventBusMiddleware + +class AnalyticsMiddleware(EventBusMiddleware): + async def process_handler_start(self, eventbus, event, event_result): + await analytics_bus.dispatch(HandlerStartedAnalyticsEvent(event_id=event_result.event_id)) + + async def process_handler_end(self, eventbus, event, event_result): + await analytics_bus.dispatch(HandlerCompletedAnalyticsEvent(event_id=event_result.event_id)) + + async def process_handler_exception(self, eventbus, event, event_result, error): + await analytics_bus.dispatch(HandlerCompletedAnalyticsEvent(event_id=event_result.event_id, error=error)) +``` + +Middlewares can observe or mutate the `EventResult` at each step, dispatch additional events, or trigger other side effects (metrics, retries, auth checks, etc.). + +Pair that with the built-in `SQLiteHistoryMirrorMiddleware` to mirror every event and handler transition into append-only `events_log` and `event_results_log` tables, making it easy to inspect or audit the bus state: + +```python +from bubus import EventBus, SQLiteHistoryMirrorMiddleware + +bus = EventBus(middlewares=[SQLiteHistoryMirrorMiddleware('./events.sqlite')]) +``` - `max_history_size`: Maximum number of events to keep in history (default: 50, None = unlimited) #### `EventBus` Properties @@ -554,9 +712,77 @@ result = await event # await the pending Event to get the completed Event **Note:** When `max_history_size` is set, EventBus enforces a hard limit of 100 pending events (queue + processing) to prevent runaway memory usage. Dispatch will raise `RuntimeError` if this limit is exceeded. -##### `expect(event_type: str | Type[BaseEvent], timeout: float | None=None, predicate: Callable[[BaseEvent], bool]=None) -> BaseEvent` +##### `query(event_type: str | Type[BaseEvent], *, include: Callable[[BaseEvent], bool] | None=None, exclude: Callable[[BaseEvent], bool] | None=None, since: timedelta | float | int | None=None) -> BaseEvent | None` + +Return the most recently completed event in history that matches the type and optional predicates. Returns `None` if nothing qualifies. + +```python +recent_sync = await bus.query( + SyncEvent, + since=timedelta(seconds=30), + include=lambda e: e.account_id == account_id, +) + +if recent_sync is not None: + print('We already synced recently, skipping') +``` + +##### `find(event_type: str | Type[BaseEvent], *, where: Callable[[BaseEvent], bool]=None, child_of: BaseEvent | None=None, past: bool | float=True, future: bool | float=True) -> BaseEvent | None` + +Find an event matching criteria in history and/or future. This is the recommended unified method for event lookup. + +**Parameters:** + +- `event_type`: The event type string or model class to find +- `where`: Predicate function for filtering (default: matches all) +- `child_of`: Only match events that are descendants of this parent event +- `past`: Controls history search behavior (default: `True`) + - `True`: search all history + - `False`: skip history search + - `float`: search events from last N seconds only +- `future`: Controls future wait behavior (default: `True`) + - `True`: wait forever for matching event + - `False`: don't wait for future events + - `float`: wait up to N seconds for matching event + +```python +# Search all history, wait up to 5s for future +event = await bus.find(ResponseEvent, past=True, future=5) + +# Search last 5s of history, wait forever +event = await bus.find(ResponseEvent, past=5, future=True) + +# Search last 5s of history, wait up to 5s +event = await bus.find(ResponseEvent, past=5, future=5) + +# Search all history only, don't wait (instant) +event = await bus.find(ResponseEvent, past=True, future=False) -Wait for a specific event to occur. +# Wait up to 5s for future only (ignore history) +event = await bus.find(ResponseEvent, past=False, future=5) + +# Find child of a specific parent event +child = await bus.find(ChildEvent, child_of=parent_event, future=5) + +# With custom filter +event = await bus.find(ResponseEvent, where=lambda e: e.status == 'success', future=5) +``` + +##### `expect(event_type: str | Type[BaseEvent], *, include: Callable=None, exclude: Callable=None, timeout: float | None=None, past: bool | float=False, child_of: BaseEvent | None=None) -> BaseEvent | None` + +Wait for a specific event to occur. This is a backwards-compatible wrapper around `find()`. + +**Parameters:** + +- `event_type`: The event type string or model class to wait for +- `include`: Filter function that must return `True` for the event to match +- `exclude`: Filter function that must return `False` for the event to match +- `timeout`: Maximum time to wait in seconds (None = wait forever). Maps to `future` parameter of `find()`. +- `past`: Controls history search behavior (default: `False`) + - `True`: search all history first + - `False`: skip history search + - `float`: search events from last N seconds +- `child_of`: Only match events that are descendants of this parent event ```python # Wait for any UserEvent @@ -565,8 +791,39 @@ event = await bus.expect('UserEvent', timeout=30) # Wait with custom filter event = await bus.expect( 'UserEvent', - predicate=lambda e: e.user_id == 'specific_user' + include=lambda e: e.user_id == 'specific_user', + timeout=30, ) + +# Search history first, then wait +event = await bus.expect('UserEvent', past=True, timeout=30) + +# Search last 10 seconds of history, then wait +event = await bus.expect('UserEvent', past=10, timeout=30) + +# Find child event +child = await bus.expect(ChildEvent, child_of=parent_event, timeout=5) + +if event is None: + print('No matching event arrived within 30 seconds') +``` + +##### `event_is_child_of(event: BaseEvent, ancestor: BaseEvent) -> bool` + +Check if event is a descendant of ancestor (child, grandchild, etc.). + +```python +if bus.event_is_child_of(child_event, parent_event): + print("child_event is a descendant of parent_event") +``` + +##### `event_is_parent_of(event: BaseEvent, descendant: BaseEvent) -> bool` + +Check if event is an ancestor of descendant (parent, grandparent, etc.). + +```python +if bus.event_is_parent_of(parent_event, child_event): + print("parent_event is an ancestor of child_event") ``` ##### `wait_until_idle(timeout: float | None=None)` @@ -606,7 +863,7 @@ class BaseEvent(BaseModel, Generic[T_EventResultType]): # Framework-managed fields event_type: str # Defaults to class name event_id: str # Unique UUID7 identifier, auto-generated if not provided - event_timeout: float = 60.0 # Maximum execution in seconds for each handler + event_timeout: float = 300.0 # Maximum execution in seconds for each handler event_schema: str # Module.Class@version (auto-set based on class & LIBRARY_VERSION env var) event_parent_id: str # Parent event ID (auto-set) event_path: list[str] # List of bus names traversed (auto-set) @@ -626,7 +883,7 @@ class BaseEvent(BaseModel, Generic[T_EventResultType]): #### `BaseEvent` Properties -- `event_status`: `Literal['pending', 'started', 'complete']` Event status +- `event_status`: `Literal['pending', 'started', 'completed']` Event status - `event_started_at`: `datetime` When first handler started processing - `event_completed_at`: `datetime` When all handlers completed processing - `event_children`: `list[BaseEvent]` Get any child events emitted during handling of this event @@ -762,6 +1019,17 @@ long_lists = await event.event_results_flat_list(include=lambda r: isinstance(r. all_items = await event.event_results_flat_list(raise_if_any=False, raise_if_none=False) ``` +##### `event_create_pending_results(handlers: dict[str, EventHandler], eventbus: EventBus | None = None, timeout: float | None = None) -> dict[str, EventResult]` + +Create (or reset) the `EventResult` placeholders for the provided handlers. The `EventBus` uses this internally before it begins executing handlers so that the event's state is immediately visible. Advanced users can call it when coordinating handler execution manually. + +```python +applicable_handlers = bus._get_applicable_handlers(event) # internal helper shown for illustration +pending_results = event.event_create_pending_results(applicable_handlers, eventbus=bus) + +assert all(result.status == 'pending' for result in pending_results.values()) +``` + ##### `event_bus` (property) Shortcut to get the `EventBus` that is currently processing this event. Can be used to avoid having to pass an `EventBus` instance to your handlers. @@ -785,7 +1053,7 @@ async def some_handler(event: MyEvent): The placeholder object that represents the pending result from a single handler executing an event. `Event.event_results` contains a `dict[PythonIdStr, EventResult]` in the shape of `{handler_id: EventResult()}`. -You shouldn't need to ever directly use this class, it's an internal wrapper to track pending and completed results from each handler within `BaseEvent.event_results`. +You generally won't interact with this class directlyβ€”the bus instantiates and updates it for youβ€”but its API is documented here for advanced integrations and custom dispatch loops. #### `EventResult` Fields @@ -799,12 +1067,12 @@ class EventResult(BaseModel): status: str # 'pending', 'started', 'completed', 'error' result: Any # Handler return value - error: str | None # Error message if failed + error: BaseException | None # Captured exception if the handler failed started_at: datetime # When handler started completed_at: datetime # When handler completed timeout: float # Handler timeout in seconds - child_events: list[BaseEvent] # list of child events emitted during handler execution + event_children: list[BaseEvent] # child events emitted during handler execution ``` #### `EventResult` Methods @@ -818,6 +1086,9 @@ handler_result = event.event_results['handler_id'] value = await handler_result # Returns result or raises an exception if handler hits an error ``` +- `execute(event, handler, *, eventbus, timeout, enter_handler_context, exit_handler_context, format_exception_for_log)` + Low-level helper that runs the handler, updates timing/status fields, captures errors, and notifies its completion signal. `EventBus.execute_handler()` delegates to this; you generally only need it when building a custom bus or integrating the event system into another dispatcher. + --- ## 🧡 Advanced Concurrency Control @@ -966,6 +1237,7 @@ uv run pytest tests/test_eventbus.py - https://www.cosmicpython.com/book/chapter_08_events_and_message_bus.html#message_bus_diagram ⭐️ - https://developer.mozilla.org/en-US/docs/Web/API/EventTarget ⭐️ +- https://github.com/sindresorhus/emittery ⭐️ (equivalent for JS), https://github.com/EventEmitter2/EventEmitter2, https://github.com/vitaly-t/sub-events - https://github.com/pytest-dev/pluggy ⭐️ - https://github.com/teamhide/fastapi-event ⭐️ - https://github.com/ethereum/lahja ⭐️ diff --git a/bubus-ts/.prettierignore b/bubus-ts/.prettierignore new file mode 100644 index 0000000..849ddff --- /dev/null +++ b/bubus-ts/.prettierignore @@ -0,0 +1 @@ +dist/ diff --git a/bubus-ts/README.md b/bubus-ts/README.md new file mode 100644 index 0000000..95cc54b --- /dev/null +++ b/bubus-ts/README.md @@ -0,0 +1,558 @@ +# bubus-ts: Python vs JS Differences (and the tricky parts) + +This README only covers the differences between the Python implementation and this TypeScript port, plus the +gotchas we uncovered while matching behavior. It intentionally does **not** re-document the full TS API surface. + +## Key Differences vs Python + +### 1) Awaiting events: `event.done()` instead of `await event` + +- Python: `await event` waits for handlers and can jump the queue when awaited inside a handler. +- TS: use `await event.done()` for the same behavior. +- Outside a handler, `done()` just waits for completion (it does not jump the queue). +- Inside a handler, `done()` triggers immediate processing (queue jump) on **all buses** where the event is queued. + +### 2) Cross-bus queue jump (forwarding) + +- Python uses a global re-entrant lock to let awaited events process immediately on every bus where they appear. +- TS optionally uses `AsyncLocalStorage` on Node.js (auto-detected) to capture dispatch context, but falls back gracefully in browsers. +- `EventBus._all_instances` + the `LockManager` pause mechanism pauses each runloop and processes the same event immediately across buses. + +### 3) `event.bus` is a BusScopedEvent view + +- In Python, `event.event_bus` is dynamic (contextvars). +- In TS, `event.bus` is provided by a **BusScopedEvent** (a Proxy over the original event). +- That proxy injects a bus-bound `emit/dispatch` to ensure correct parent/child tracking. + +### 4) Monotonic timestamps + +- JS `Date.now()` is not strictly monotonic at millisecond granularity. +- To keep FIFO tests stable, we generate strictly increasing timestamps via `BaseEvent.nextTimestamp()` (returns `{ date, isostring, ts }`). + +### 5) No middleware, no WAL, no SQLite mirrors + +- Those Python features were intentionally dropped for the JS version. + +### 6) Default timeouts come from the EventBus + +- `BaseEvent.event_timeout` defaults to `null`. +- When dispatched, `EventBus` applies its default `event_timeout` (60s unless configured). +- You can set `{ event_timeout: null }` on the bus to disable timeouts entirely. +- Slow handler warnings fire after `event_handler_slow_timeout` (default: `30s`). Slow event warnings fire after `event_slow_timeout` (default: `300s`). + +## EventBus Options + +All options are passed to `new EventBus(name, options)`. + +- `max_history_size?: number | null` (default: `100`) + - Max number of events kept in history. Set to `null` for unlimited history. +- `event_concurrency?: "global-serial" | "bus-serial" | "parallel" | "auto"` (default: `"bus-serial"`) + - Controls how many **events** can be processed at a time. + - `"global-serial"` enforces FIFO across all buses. + - `"bus-serial"` enforces FIFO per bus, allows cross-bus overlap. + - `"parallel"` allows events to process concurrently. + - `"auto"` uses the bus default (mostly useful for overrides). +- `event_handler_concurrency?: "global-serial" | "bus-serial" | "parallel" | "auto"` (default: `"bus-serial"`) + - Controls how many **handlers** run at once for each event. + - Same semantics as `event_concurrency`, but applied to handler execution. +- `event_timeout?: number | null` (default: `60`) + - Default handler timeout in seconds, applied when `event.event_timeout` is `null`. + - Set to `null` to disable timeouts globally for the bus. +- `event_handler_slow_timeout?: number | null` (default: `30`) + - Warn after this many seconds for slow handlers. + - Only warns when the handler's timeout is `null` or greater than this value. + - Set to `null` to disable slow handler warnings. +- `event_slow_timeout?: number | null` (default: `300`) + - Warn after this many seconds for slow event processing. + - Set to `null` to disable slow event warnings. + +## Concurrency Overrides and Precedence + +You can override concurrency per event and per handler: + +```ts +const FastEvent = BaseEvent.extend('FastEvent', { + payload: z.string(), +}) + +// Per-event override (highest precedence) +const event = FastEvent({ + payload: 'x', + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', +}) + +// Per-handler override (lower precedence) +bus.on(FastEvent, handler, { event_handler_concurrency: 'parallel' }) +``` + +Precedence order (highest β†’ lowest): + +1. Event instance overrides (`event_concurrency`, `event_handler_concurrency`) +2. Handler options (`event_handler_concurrency`) +3. Bus defaults (`event_concurrency`, `event_handler_concurrency`) + +`"auto"` resolves to the bus default. + +## Handler Options + +Handlers can be configured at registration time: + +```ts +bus.on(SomeEvent, handler, { + event_handler_concurrency: 'parallel', + handler_timeout: 10, // per-handler timeout in seconds +}) +``` + +- `event_handler_concurrency` allows per-handler concurrency overrides. +- `handler_timeout` sets a per-handler timeout in seconds (overrides the bus default when lower). + +## TypeScript Return Type Enforcement (Edge Cases) + +TypeScript can only enforce handler return types when the event type is inferable at compile time. + +- `bus.on(EventFactoryOrClass, handler)`: + - Return values are type-checked against the event's `event_result_schema` (if defined). + - `undefined` (or no return) is always allowed. +- `bus.on('SomeEventName', handler)`: + - Return type checking is best-effort only (treated as unknown in typing). + - Use class/factory keys when you want compile-time return-shape enforcement. +- `bus.on('*', handler)`: + - Return type checking is intentionally loose (best-effort only), because wildcard handlers may receive many event types, including forwarded events from other buses. + - In practice, wildcard handlers are expected to be side-effect/forwarding handlers and usually return `undefined`. + +Runtime behavior is still consistent across all key styles: + +- If an event has `event_result_schema` and a handler returns a non-`undefined` value, that value is validated at runtime. +- If the handler returns `undefined`, schema validation is skipped and the result is accepted. + +## Throughput + Memory Behavior (Current) + +This section documents the current runtime profile and the important edge cases. It is intentionally conservative: +we describe what is enforced today, not theoretical best-case behavior. + +### Throughput model + +- Baseline throughput in tests is gated at `<30s` for: + - `50k events within reasonable time` + - `50k events with ephemeral on/off handler registration across 2 buses` + - `500 ephemeral buses with 100 events each` +- The major hot-path operations are linear in collection sizes: + - Per event, handler matching is `O(total handlers on bus)` (`exact` scan + `*` scan). + - `.off()` is `O(total handlers on bus)` for matching/removal. + - Queue-jump (`await event.done()` inside handlers) does cross-bus discovery by walking `event_path` and iterating `EventBus._all_instances`, so cost grows with buses and forwarding depth. +- `waitUntilIdle()` is best used at batch boundaries, not per event: + - Idle checks call `isIdle()`, which scans `event_history` and handler results. + - There is a fast-path that skips idle scans when no idle waiters exist, which keeps normal dispatch/complete flows fast even with large history. +- Concurrency settings are a direct throughput limiter: + - `global-serial` and `bus-serial` intentionally serialize work. + - `parallel` increases throughput but can increase transient memory if producers outpace consumers. + +### Memory model + +- Per bus, strong references are held for: + - `handlers` + - `pending_event_queue` + - `in_flight_event_ids` + - `event_history` (bounded by `max_history_size`, or unbounded if `null`) + - active `find()` waiters until match/timeout +- Per event, retained state includes: + - `event_results` (per-handler result objects) + - descendant links in `event_results[].event_children` +- History trimming behavior: + - Completed events are evicted first (oldest first). + - If still over limit, oldest remaining events are dropped even if pending, and a warning is logged. + - Eviction calls `event._gc()` to clear internal references (`event_results`, child arrays, bus/context pointers). +- Memory is not strictly bounded by only `pending_queue_size + max_history_size`: + - A retained parent event can hold references to many children/grandchildren via `event_children`. + - So effective retained memory can exceed a simple `event_count * avg_event_size` bound in high fan-out trees. +- `destroy()` is recommended for deterministic cleanup, but not required for GC safety: + - `_all_instances` is WeakRef-based, so unreferenced buses can be collected without calling `.destroy()`. + - There is a GC regression test for this (`unreferenced buses with event history are garbage collected without destroy()`). +- `heapUsed` vs `rss`: + - `heapUsed` returning near baseline after GC is the primary leak signal in tests. + - `rss` can stay elevated due to V8 allocator high-water behavior and is not, by itself, a proof of leak. + +### Practical guidance for high-load deployments + +- Keep `max_history_size` finite in production. +- Avoid very large wildcard handler sets on hot event types. +- Avoid calling `waitUntilIdle()` for every single event in large streams; prefer periodic/batch waits. +- Be aware that very deep/high-fan-out parent-child graphs increase retained memory until parent events are evicted. +- Use `.destroy()` for explicit lifecycle control in request-scoped or short-lived bus patterns. + +## Semaphores (how concurrency is enforced) + +We use four semaphores: + +- `LockManager.global_event_semaphore` +- `LockManager.global_handler_semaphore` +- `bus.locks.bus_event_semaphore` +- `bus.locks.bus_handler_semaphore` + +They are applied centrally when scheduling events and handlers, so concurrency is controlled without scattering +mutex checks throughout the code. + +## Full lifecycle across concurrency modes + +Below is the complete execution flow for nested events, including forwarding across buses, and how it behaves +under different `event_concurrency` / `event_handler_concurrency` configurations. + +### 1) Base execution flow (applies to all modes) + +**Dispatch (non-awaited):** + +1. `dispatch()` normalizes to `original_event`, sets `bus` if missing. +2. Captures `_dispatch_context` (AsyncLocalStorage if available). +3. Applies `event_timeout_default` if `event.event_timeout === null`. +4. If this bus is already in `event_path` (or `bus.hasProcessedEvent()`), return a BusScopedEvent without queueing. +5. Append bus name to `event_path`, record child relationship (if `event_parent_id` is set). +6. Add to `event_history` (a `Map` keyed by event id). +7. Increment `event_pending_bus_count`. +8. Push to `pending_event_queue` and `startRunloop()`. + +**Runloop + processing:** + +1. `runloop()` drains `pending_event_queue`. +2. Adds event id to `in_flight_event_ids`. +3. Calls `scheduleEventProcessing()` (async). +4. `scheduleEventProcessing()` selects the event semaphore and runs `processEvent()`. +5. `processEvent()`: + - `event.markStarted()` + - `notifyFindListeners(event)` + - creates handler results (`event_results`) + - runs handlers (respecting handler semaphore) + - decrements `event_pending_bus_count` and calls `event.markCompleted(false)` (completes only if all buses and children are done) + +### 2) Event concurrency modes (`event_concurrency`) + +- **`global-serial`**: events are serialized across _all_ buses using `LockManager.global_event_semaphore`. +- **`bus-serial`**: events are serialized per bus; different buses can overlap. +- **`parallel`**: no event semaphore; events can run concurrently on the same bus. +- **`auto`**: resolves to the bus default. + +**Mixed buses:** each bus enforces its own event mode. Forwarding to another bus does not inherit the source bus’s mode. + +### 3) Handler concurrency modes (`event_handler_concurrency`) + +`event_handler_concurrency` controls how handlers run **for a single event**: + +- **`global-serial`**: only one handler at a time across all buses using `LockManager.global_handler_semaphore`. +- **`bus-serial`**: handlers serialize per bus. +- **`parallel`**: handlers run concurrently for the event. +- **`auto`**: resolves to the bus default. + +**Interaction with event concurrency:** +Even if events are parallel, handlers can still be serialized: +`event_concurrency: "parallel"` + `event_handler_concurrency: "bus-serial"` means events start concurrently but handler execution on a bus is serialized. + +### 4) Forwarding across buses (non-awaited) + +When a handler on Bus A calls `bus_b.dispatch(event)` without awaiting: + +- Bus A continues running its handler. +- Bus B queues and processes the event according to **Bus B’s** concurrency settings. +- No coupling unless both buses use the global semaphores. + +### 5) Queue-jump (`await event.done()` inside handlers) + +When `event.done()` is awaited inside a handler, **queue-jump** happens: + +1. `BaseEvent.done()` delegates to `bus.processEventImmediately()`, which detects whether we're inside a handler + (via `getActiveHandlerResult()` / `getParentEventResultAcrossAllBusses()`). If not inside a handler, it falls back to `waitForCompletion()`. +2. `processEventImmediately()` **yields** the parent handler's concurrency semaphore (if held) so child handlers can acquire it. +3. `processEventImmediately()` removes the event from the pending queue (if present). +4. `runImmediatelyAcrossBuses()` processes the event immediately on all buses where it is queued. +5. While immediate processing is active, each affected bus's runloop is paused to prevent unrelated events from running. +6. Once immediate processing completes, `processEventImmediately()` **re-acquires** the parent handler's semaphore + (unless the parent timed out while the child was processing). +7. Paused runloops resume. + +**Important:** queue-jump bypasses event semaphores but **respects** handler semaphores via yield-and-reacquire. +This means queue-jumped handlers run serially on a `bus-serial` bus, not in parallel. + +### 6) Precedence recap + +Highest β†’ lowest: + +1. Event instance fields (`event_concurrency`, `event_handler_concurrency`) +2. Handler options (`event_handler_concurrency`) +3. Bus defaults + +`"auto"` always resolves to the bus default. + +## Gotchas and Design Choices (What surprised us) + +### A) Handler attribution without AsyncLocalStorage + +We need to know **which handler emitted a child** to correctly assign: + +- `event_parent_id` +- `event_emitted_by_handler_id` +- and to attach child events under the correct handler in the tree. + +In TS we do this by injecting a **BusScopedEvent** into handlers, which captures the active handler id and +propagates it via `event_emitted_by_handler_id`. This keeps parentage deterministic even with nested awaits. + +### B) Why runloop pausing exists + +When an event is awaited inside a handler, the event must **jump the queue**. If the runloop continues normally, +it could process unrelated events ("overshoot"), breaking FIFO guarantees. + +The `LockManager` pause mechanism (`requestPause`/`waitUntilRunloopResumed`) pauses the runloop while we run the awaited +event immediately. Once the queue-jump completes, the runloop resumes in FIFO order. This matches the Python behavior. + +### C) BusScopedEvent: why it exists and how it works + +Forwarding exposes a subtle bug: if you pass the **same event object** to another bus, a naive implementation +can mutate `event.bus` mid-handler and break parent-child tracking. + +To prevent that: + +- Handlers always receive a **BusScopedEvent** (Proxy of the original event). +- Its `bus` property is a proxy over the real `EventBus`. +- That proxy intercepts `emit/dispatch` to set `event_parent_id` and attach children to the correct handler. +- The original event object is still the canonical one stored in history. + +### D) Cross-bus immediate processing (forwarding + awaiting) + +When you `await event.done()` inside a handler: + +- the system finds all buses that have this event queued (using `EventBus._all_instances` + `event_path`) +- pauses their runloops +- processes the event immediately on each bus +- then resumes the runloops + +This gives the same "awaited events jump the queue" semantics as Python, but without a global lock. + +### E) Why `event.bus` is required for `done()` + +`done()` is the signal to run an event immediately when called inside a handler. Without a bus, we can't +perform the queue jump, so `done()` throws if no bus is attached. + +## Summary + +The core contract is preserved: + +- FIFO order +- child event tracking +- forwarding +- await-inside-handler queue jump + +But the **implementation details are different** because JS needs browser compatibility and lacks Python's +contextvars + asyncio primitives. The `LockManager` (runloop pause + semaphore coordination), `HandlerLock` +(yield-and-reacquire), and `BusScopedEvent` proxy are the key differences that make the behavior match in practice. + +--- + +## `retry()` Decorator + +`retry()` adds retry logic and optional semaphore-based concurrency limiting to any async function. + +### Why retry is a handler-level concept + +Retry and timeout belong on the **handler**, not on `emit()` or `done()`: + +- **Handlers fail, events don't.** An event has no error state β€” it's a message. Individual handlers + produce errors, timeouts, and exceptions that may need retrying. The handler knows *why* it failed + and whether retrying makes sense. + +- **Replayability.** When you replay an event log, each emit should produce exactly one event. If retry + lives on the handler, the log records one emit β†’ one handler invocation β†’ one result. The retry + attempts are invisible implementation details. If retry lives on `emit()`, the log contains multiple + separate events for the same logical operation, making replays non-deterministic. + +- **Separation of concerns.** Event-level concurrency (`event_concurrency`) and handler-level concurrency + (`event_handler_concurrency`) are bus-level scheduling concerns. Retry/timeout/semaphore limiting are + handler-level resilience concerns. They compose orthogonally β€” don't mix them. + +### Recommended pattern: `@retry()` on class methods + +```ts +import { retry, EventBus, BaseEvent } from 'bubus' + +class ScreenshotService { + constructor(private bus: InstanceType) { + bus.on(ScreenshotRequestEvent, this.on_ScreenshotRequest.bind(this)) + } + + @retry({ + max_attempts: 4, + retry_on_errors: [/timeout/i], + timeout: 5, + semaphore_scope: 'global', + semaphore_name: 'Screenshots', + semaphore_limit: 2, + }) + async on_ScreenshotRequest(event: InstanceType): Promise { + // At most 2 concurrent screenshot operations globally. + // Each attempt times out after 5s. Up to 4 total attempts. + // Only retries on timeout-related errors. + return await takeScreenshot(event.data.url) + } +} + +// Emit side stays clean β€” no retry/timeout concerns +const event = bus.emit(ScreenshotRequestEvent({ url: 'https://example.com' })) +await event.done() +``` + +This is the primary supported pattern. The `@retry()` decorator handles: +- **Retry logic**: max attempts, backoff, error filtering +- **Per-attempt timeout**: each attempt gets its own deadline +- **Concurrency limiting**: semaphore-based, with global/class/instance scoping + +The emit site just dispatches events and awaits completion β€” it doesn't know or care about retries. + +### Also works: inline HOF for simple handlers + +```ts +// For one-off handlers that don't need a class +bus.on(MyEvent, retry({ max_attempts: 3, timeout: 10 })(async (event) => { + await riskyOperation(event.data) +})) +``` + +### Options + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `max_attempts` | `number` | `1` | Total attempts including the initial call. `1` = no retry, `3` = up to 2 retries. | +| `retry_after` | `number` | `0` | Seconds to wait between retries. | +| `retry_backoff_factor` | `number` | `1.0` | Multiplier applied to `retry_after` after each attempt. `2.0` = exponential backoff. | +| `retry_on_errors` | `(ErrorClass \| string \| RegExp)[]` | `undefined` | Only retry when the error matches a matcher. Accepts class constructors (`instanceof`), strings (matched against `error.name`), or RegExp (tested against `String(error)`). Can be mixed: `[TypeError, 'NetworkError', /timeout/i]`. `undefined` = retry on any error. | +| `timeout` | `number \| null` | `undefined` | Per-attempt timeout in seconds. Throws `RetryTimeoutError` if exceeded. | +| `semaphore_limit` | `number \| null` | `undefined` | Max concurrent executions sharing this semaphore. | +| `semaphore_name` | `string \| null` | fn name | Semaphore identifier. Functions with the same name share the same slot pool. | +| `semaphore_lax` | `boolean` | `true` | If `true`, proceed without concurrency limit when semaphore acquisition times out. | +| `semaphore_scope` | `'global' \| 'class' \| 'instance'` | `'global'` | `'global'`: one semaphore for all calls. `'class'`: one per class (keyed by `constructor.name`). `'instance'`: one per object instance (keyed by WeakMap identity). `'class'`/`'instance'` require `this` to be an object; they fall back to `'global'` for standalone calls. | +| `semaphore_timeout` | `number \| null` | `undefined` | Max seconds to wait for semaphore. Default: `timeout * max(1, limit - 1)`. | + +### Error types + +- **`RetryTimeoutError`** β€” thrown when a single attempt exceeds `timeout`. Has `.timeout_seconds` and `.attempt` fields. Retryable by default (treated like any other error in the retry loop). +- **`SemaphoreTimeoutError`** β€” thrown (when `semaphore_lax=false`) if the semaphore cannot be acquired within the timeout. Has `.semaphore_name`, `.semaphore_limit`, `.timeout_seconds` fields. + +### Semaphore concurrency control + +The semaphore is acquired **once** before the first attempt and held across all retries. This prevents other +callers from stealing the slot between retry attempts. + +```ts +class ApiService { + @retry({ + max_attempts: 2, + semaphore_limit: 3, + semaphore_name: 'api_calls', + }) + async callExternalApi(): Promise { + // At most 3 concurrent calls across all instances of ApiService + return await fetch('https://api.example.com') + } +} +``` + +Functions that share a `semaphore_name` share the same slot pool β€” this is how you limit concurrency across +different functions that access the same resource. + +### Re-entrancy and deadlock prevention + +The decorator uses `AsyncLocalStorage` (on Node.js) to track which semaphores are held in the current async +call stack. When a nested call encounters a semaphore it already holds, it **skips acquisition** and runs +directly within the parent's slot. This prevents deadlocks in recursive or nested scenarios: + +```ts +const inner = retry({ semaphore_limit: 1, semaphore_name: 'shared' })(async () => 'ok') + +const outer = retry({ semaphore_limit: 1, semaphore_name: 'shared' })(async () => { + // Without re-entrancy tracking, this would deadlock: + // outer holds the semaphore, inner tries to acquire the same one. + // With re-entrancy, inner detects 'shared' is already held and skips acquisition. + return await inner() +}) + +await outer() // works, no deadlock +``` + +This also works for recursive calls (a function calling itself) and deeply nested chains (A β†’ B β†’ C all sharing +a semaphore). + +In browsers (no `AsyncLocalStorage`), re-entrancy tracking is unavailable and the decorator gracefully degrades +to a no-op (no deadlock detection). Avoid recursive/nested calls through the same semaphore in browser +environments, or use different `semaphore_name` values. + +### Interaction with bus concurrency options + +`retry()` and the bus's concurrency modes are **orthogonal** and compose together: + +- **`event_concurrency`** controls how many events the bus processes at once (via the runloop + event semaphore). +- **`event_handler_concurrency`** controls how many handlers run concurrently for a single event (via the handler semaphore). +- **`retry()` semaphores** control how many concurrent invocations of a specific handler are allowed (via a global semaphore registry). + +These are separate concerns: +- Bus concurrency = scheduling (how the bus orders event/handler execution) +- Retry semaphores = resilience (how individual handlers manage concurrency and failure recovery) + +When you use `@retry()` on a bus handler, both layers apply. The execution order is: +1. Bus acquires the **handler concurrency semaphore** (e.g. `bus-serial`) +2. `retry()` acquires its own **retry semaphore** (if `semaphore_limit` is set) +3. The handler function runs (with retries if it throws) +4. `retry()` releases its semaphore +5. Bus releases the handler concurrency semaphore + +The bus's `handler_timeout` and `retry()`'s `timeout` are independent: +- `handler_timeout` (set via `bus.on()` options or bus defaults) applies to the **entire** wrapped handler call, including all retry attempts. +- `retry({ timeout })` applies to **each individual attempt**. + +If you need per-attempt timeouts, use `retry({ timeout })`. If you need an overall deadline for the handler +(including all retries), rely on the bus's `handler_timeout`. + +### Discouraged: wrapping `emit()` β†’ `done()` in `retry()` + +This pattern is technically supported but **not recommended**: + +```ts +// DON'T DO THIS β€” retry belongs on the handler, not the emit site. +const event = await retry({ max_attempts: 4 })(async () => { + const ev = bus.emit(ScreenshotRequestEvent({ full_page: false })) + await ev.done() + if (ev.event_errors.length) throw ev.event_errors[0] + return ev +})() +``` + +Why this is worse: + +1. **Architecture**: the emit site doesn't know which handler failed or why. The handler is the right + place for retry logic because it has the context to decide whether retrying makes sense. + +2. **Replayability**: each retry dispatches a **new event**, producing multiple events in the log for + one logical operation. On replay, if the handler succeeds on the first attempt, you get a different + event topology than the original run. With handler-level retry, the log always shows one emit β†’ one + handler result, regardless of how many retry attempts were needed internally. + +3. **Determinism**: the same emit may fan out to multiple handlers. Retrying the whole dispatch because + one handler failed also re-runs handlers that succeeded β€” wasteful and potentially side-effectful. + +Use the `@retry()` decorator on the handler method instead. + +### Differences from the Python `@retry` decorator + +| Aspect | Python | TypeScript | +|--------|--------|------------| +| **Naming** | `retries=3` (retry count after first attempt) | `max_attempts=1` (total attempts including first) | +| **Naming** | `wait=3` (seconds between retries) | `retry_after=0` (seconds between retries) | +| **Naming** | `retry_on` | `retry_on_errors` | +| **Default retries** | 3 retries (4 total attempts) | 1 attempt (no retries) | +| **Default delay** | 3 seconds | 0 seconds | +| **Default timeout** | 5 seconds per attempt | No timeout | +| **Semaphore scopes** | `'global'`, `'class'`, `'self'`, `'multiprocess'` | `'global'`, `'class'`, `'instance'` (no multiprocess β€” single-process JS runtime) | +| **System overload** | Tracks active operations, checks CPU/memory via `psutil` | Not implemented | +| **Re-entrancy** | Not implemented (relies on Python's GIL + asyncio single-thread) | `AsyncLocalStorage`-based tracking to prevent deadlocks | +| **Syntax** | `@retry(...)` decorator on `async def` | `@retry({...})` on class methods (TC39 Stage 3), or `retry({...})(fn)` HOF | +| **Sync functions** | Not supported (async-only) | Supported (wrapper always returns a Promise) | + +The TS version intentionally starts with conservative defaults (1 attempt, no delay, no timeout) so that +`retry()` with no options is a no-op wrapper. The Python version defaults to 3 retries with 3s delay and 5s +timeout, which is more aggressive. diff --git a/bubus-ts/eslint.config.js b/bubus-ts/eslint.config.js new file mode 100644 index 0000000..4783e2a --- /dev/null +++ b/bubus-ts/eslint.config.js @@ -0,0 +1,22 @@ +import ts_parser from '@typescript-eslint/parser' +import ts_eslint_plugin from '@typescript-eslint/eslint-plugin' + +export default [ + { + files: ['**/*.ts'], + languageOptions: { + parser: ts_parser, + parserOptions: { + sourceType: 'module', + ecmaVersion: 'latest', + }, + }, + plugins: { + '@typescript-eslint': ts_eslint_plugin, + }, + rules: { + 'no-unused-vars': 'off', + '@typescript-eslint/no-unused-vars': ['error', { argsIgnorePattern: '^_' }], + }, + }, +] diff --git a/bubus-ts/examples/log_tree_demo.ts b/bubus-ts/examples/log_tree_demo.ts new file mode 100644 index 0000000..a4aaef0 --- /dev/null +++ b/bubus-ts/examples/log_tree_demo.ts @@ -0,0 +1,98 @@ +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const RootEvent = BaseEvent.extend('RootEvent', { + url: z.string(), + event_result_schema: z.string(), + event_result_type: 'string', +}) + +const ChildEvent = BaseEvent.extend('ChildEvent', { + tab_id: z.string(), + event_result_schema: z.string(), + event_result_type: 'string', +}) + +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', { + status: z.string(), + event_result_schema: z.string(), + event_result_type: 'string', +}) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +async function main(): Promise { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + + async function forward_to_bus_b(event: InstanceType): Promise { + await delay(20) + bus_b.dispatch(event) + return 'forwarded_to_bus_b' + } + + bus_a.on('*', forward_to_bus_b) + + async function root_fast_handler(event: InstanceType): Promise { + await delay(10) + const child = event.bus?.emit(ChildEvent({ tab_id: 'tab-123', event_timeout: 0.1 })) + if (child) { + await child.done() + } + return 'root_fast_handler_ok' + } + + async function root_slow_handler(event: InstanceType): Promise { + event.bus?.emit(ChildEvent({ tab_id: 'tab-timeout', event_timeout: 0.1 })) + await delay(400) + return 'root_slow_handler_timeout' + } + + bus_a.on(RootEvent, root_fast_handler) + bus_a.on(RootEvent, root_slow_handler) + + async function child_slow_handler(_event: InstanceType): Promise { + await delay(150) + return 'child_slow_handler_done' + } + + async function child_fast_handler(event: InstanceType): Promise { + await delay(10) + const grandchild = event.bus?.emit(GrandchildEvent({ status: 'ok', event_timeout: 0.05 })) + if (grandchild) { + await grandchild.done() + } + return 'child_handler_ok' + } + + async function grandchild_fast_handler(): Promise { + await delay(5) + return 'grandchild_fast_handler_ok' + } + + async function grandchild_slow_handler(): Promise { + await delay(60) + return 'grandchild_slow_handler_timeout' + } + + bus_b.on(ChildEvent, child_slow_handler) + bus_b.on(ChildEvent, child_fast_handler) + bus_b.on(GrandchildEvent, grandchild_fast_handler) + bus_b.on(GrandchildEvent, grandchild_slow_handler) + + const root_event = bus_a.dispatch(RootEvent({ url: 'https://example.com', event_timeout: 0.25 })) + + await root_event.done() + + console.log('\n=== BusA logTree ===') + console.log(bus_a.logTree()) + + console.log('\n=== BusB logTree ===') + console.log(bus_b.logTree()) +} + +await main() diff --git a/bubus-ts/package.json b/bubus-ts/package.json new file mode 100644 index 0000000..67d5406 --- /dev/null +++ b/bubus-ts/package.json @@ -0,0 +1,63 @@ +{ + "name": "bubus", + "version": "1.7.3", + "description": "Event bus library for browsers and ESM Node.js", + "type": "module", + "main": "./dist/esm/index.js", + "module": "./dist/esm/index.js", + "types": "./dist/types/index.d.ts", + "exports": { + ".": { + "types": "./dist/types/index.d.ts", + "import": "./dist/esm/index.js", + "default": "./dist/esm/index.js" + } + }, + "files": [ + "dist/esm", + "dist/types" + ], + "scripts": { + "build": "pnpm run build:esm && pnpm run build:types", + "build:esm": "esbuild src/index.ts --bundle --format=esm --platform=neutral --target=es2022 --sourcemap --outdir=dist/esm", + "build:types": "tsc -p tsconfig.json --emitDeclarationOnly", + "typecheck": "tsc -p tsconfig.json --noEmit", + "lint": "pnpm run format:check && eslint . && pnpm run typecheck", + "format": "prettier --write .", + "format:check": "prettier --check .", + "test": "NODE_OPTIONS='--expose-gc' node --expose-gc --test --import tsx tests/**/*.test.ts", + "prepack": "pnpm run build", + "release:dry-run": "pnpm publish --access public --dry-run --no-git-checks", + "release:check": "pnpm run typecheck && pnpm test && pnpm run build" + }, + "keywords": [], + "author": "", + "license": "MIT", + "packageManager": "pnpm@10.23.0", + "dependencies": { + "uuid": "^11.1.0", + "zod": "^4.3.6" + }, + "devDependencies": { + "@typescript-eslint/eslint-plugin": "^8.46.0", + "@typescript-eslint/parser": "^8.46.0", + "esbuild": "^0.27.2", + "eslint": "^9.39.2", + "prettier": "^3.8.1", + "tsx": "^4.20.6", + "typescript": "^5.9.3" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/pirate/bbus.git", + "directory": "bubus-ts" + }, + "bugs": { + "url": "https://github.com/pirate/bbus/issues" + }, + "homepage": "https://github.com/pirate/bbus/tree/main/bubus-ts", + "publishConfig": { + "access": "public", + "registry": "https://registry.npmjs.org/" + } +} diff --git a/bubus-ts/pnpm-lock.yaml b/bubus-ts/pnpm-lock.yaml new file mode 100644 index 0000000..331a564 --- /dev/null +++ b/bubus-ts/pnpm-lock.yaml @@ -0,0 +1,1234 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + .: + dependencies: + uuid: + specifier: ^11.1.0 + version: 11.1.0 + zod: + specifier: ^4.3.6 + version: 4.3.6 + devDependencies: + '@typescript-eslint/eslint-plugin': + specifier: ^8.46.0 + version: 8.54.0(@typescript-eslint/parser@8.54.0(eslint@9.39.2)(typescript@5.9.3))(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/parser': + specifier: ^8.46.0 + version: 8.54.0(eslint@9.39.2)(typescript@5.9.3) + esbuild: + specifier: ^0.27.2 + version: 0.27.2 + eslint: + specifier: ^9.39.2 + version: 9.39.2 + prettier: + specifier: ^3.8.1 + version: 3.8.1 + tsx: + specifier: ^4.20.6 + version: 4.21.0 + typescript: + specifier: ^5.9.3 + version: 5.9.3 + +packages: + '@esbuild/aix-ppc64@0.27.2': + resolution: { integrity: sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw== } + engines: { node: '>=18' } + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.27.2': + resolution: { integrity: sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA== } + engines: { node: '>=18' } + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.27.2': + resolution: { integrity: sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA== } + engines: { node: '>=18' } + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.27.2': + resolution: { integrity: sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A== } + engines: { node: '>=18' } + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.27.2': + resolution: { integrity: sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg== } + engines: { node: '>=18' } + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.27.2': + resolution: { integrity: sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA== } + engines: { node: '>=18' } + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.27.2': + resolution: { integrity: sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g== } + engines: { node: '>=18' } + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.27.2': + resolution: { integrity: sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA== } + engines: { node: '>=18' } + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.27.2': + resolution: { integrity: sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw== } + engines: { node: '>=18' } + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.27.2': + resolution: { integrity: sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw== } + engines: { node: '>=18' } + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.27.2': + resolution: { integrity: sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w== } + engines: { node: '>=18' } + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.27.2': + resolution: { integrity: sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg== } + engines: { node: '>=18' } + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.27.2': + resolution: { integrity: sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw== } + engines: { node: '>=18' } + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.27.2': + resolution: { integrity: sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ== } + engines: { node: '>=18' } + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.27.2': + resolution: { integrity: sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA== } + engines: { node: '>=18' } + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.27.2': + resolution: { integrity: sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w== } + engines: { node: '>=18' } + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.27.2': + resolution: { integrity: sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA== } + engines: { node: '>=18' } + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.27.2': + resolution: { integrity: sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw== } + engines: { node: '>=18' } + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.27.2': + resolution: { integrity: sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA== } + engines: { node: '>=18' } + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.27.2': + resolution: { integrity: sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA== } + engines: { node: '>=18' } + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.27.2': + resolution: { integrity: sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg== } + engines: { node: '>=18' } + cpu: [x64] + os: [openbsd] + + '@esbuild/openharmony-arm64@0.27.2': + resolution: { integrity: sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag== } + engines: { node: '>=18' } + cpu: [arm64] + os: [openharmony] + + '@esbuild/sunos-x64@0.27.2': + resolution: { integrity: sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg== } + engines: { node: '>=18' } + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.27.2': + resolution: { integrity: sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg== } + engines: { node: '>=18' } + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.27.2': + resolution: { integrity: sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ== } + engines: { node: '>=18' } + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.27.2': + resolution: { integrity: sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ== } + engines: { node: '>=18' } + cpu: [x64] + os: [win32] + + '@eslint-community/eslint-utils@4.9.1': + resolution: { integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ== } + engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 } + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.12.2': + resolution: { integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew== } + engines: { node: ^12.0.0 || ^14.0.0 || >=16.0.0 } + + '@eslint/config-array@0.21.1': + resolution: { integrity: sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/config-helpers@0.4.2': + resolution: { integrity: sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/core@0.17.0': + resolution: { integrity: sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/eslintrc@3.3.3': + resolution: { integrity: sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/js@9.39.2': + resolution: { integrity: sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/object-schema@2.1.7': + resolution: { integrity: sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/plugin-kit@0.4.1': + resolution: { integrity: sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@humanfs/core@0.19.1': + resolution: { integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA== } + engines: { node: '>=18.18.0' } + + '@humanfs/node@0.16.7': + resolution: { integrity: sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ== } + engines: { node: '>=18.18.0' } + + '@humanwhocodes/module-importer@1.0.1': + resolution: { integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA== } + engines: { node: '>=12.22' } + + '@humanwhocodes/retry@0.4.3': + resolution: { integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ== } + engines: { node: '>=18.18' } + + '@types/estree@1.0.8': + resolution: { integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w== } + + '@types/json-schema@7.0.15': + resolution: { integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== } + + '@typescript-eslint/eslint-plugin@8.54.0': + resolution: { integrity: sha512-hAAP5io/7csFStuOmR782YmTthKBJ9ND3WVL60hcOjvtGFb+HJxH4O5huAcmcZ9v9G8P+JETiZ/G1B8MALnWZQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + '@typescript-eslint/parser': ^8.54.0 + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/parser@8.54.0': + resolution: { integrity: sha512-BtE0k6cjwjLZoZixN0t5AKP0kSzlGu7FctRXYuPAm//aaiZhmfq1JwdYpYr1brzEspYyFeF+8XF5j2VK6oalrA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/project-service@8.54.0': + resolution: { integrity: sha512-YPf+rvJ1s7MyiWM4uTRhE4DvBXrEV+d8oC3P9Y2eT7S+HBS0clybdMIPnhiATi9vZOYDc7OQ1L/i6ga6NFYK/g== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/scope-manager@8.54.0': + resolution: { integrity: sha512-27rYVQku26j/PbHYcVfRPonmOlVI6gihHtXFbTdB5sb6qA0wdAQAbyXFVarQ5t4HRojIz64IV90YtsjQSSGlQg== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@typescript-eslint/tsconfig-utils@8.54.0': + resolution: { integrity: sha512-dRgOyT2hPk/JwxNMZDsIXDgyl9axdJI3ogZ2XWhBPsnZUv+hPesa5iuhdYt2gzwA9t8RE5ytOJ6xB0moV0Ujvw== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/type-utils@8.54.0': + resolution: { integrity: sha512-hiLguxJWHjjwL6xMBwD903ciAwd7DmK30Y9Axs/etOkftC3ZNN9K44IuRD/EB08amu+Zw6W37x9RecLkOo3pMA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/types@8.54.0': + resolution: { integrity: sha512-PDUI9R1BVjqu7AUDsRBbKMtwmjWcn4J3le+5LpcFgWULN3LvHC5rkc9gCVxbrsrGmO1jfPybN5s6h4Jy+OnkAA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@typescript-eslint/typescript-estree@8.54.0': + resolution: { integrity: sha512-BUwcskRaPvTk6fzVWgDPdUndLjB87KYDrN5EYGetnktoeAvPtO4ONHlAZDnj5VFnUANg0Sjm7j4usBlnoVMHwA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/utils@8.54.0': + resolution: { integrity: sha512-9Cnda8GS57AQakvRyG0PTejJNlA2xhvyNtEVIMlDWOOeEyBkYWhGPnfrIAnqxLMTSTo6q8g12XVjjev5l1NvMA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/visitor-keys@8.54.0': + resolution: { integrity: sha512-VFlhGSl4opC0bprJiItPQ1RfUhGDIBokcPwaFH4yiBCaNPeld/9VeXbiPO1cLyorQi1G1vL+ecBk1x8o1axORA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + acorn-jsx@5.3.2: + resolution: { integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== } + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.15.0: + resolution: { integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg== } + engines: { node: '>=0.4.0' } + hasBin: true + + ajv@6.12.6: + resolution: { integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== } + + ansi-styles@4.3.0: + resolution: { integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== } + engines: { node: '>=8' } + + argparse@2.0.1: + resolution: { integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== } + + balanced-match@1.0.2: + resolution: { integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== } + + brace-expansion@1.1.12: + resolution: { integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg== } + + brace-expansion@2.0.2: + resolution: { integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ== } + + callsites@3.1.0: + resolution: { integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== } + engines: { node: '>=6' } + + chalk@4.1.2: + resolution: { integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== } + engines: { node: '>=10' } + + color-convert@2.0.1: + resolution: { integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== } + engines: { node: '>=7.0.0' } + + color-name@1.1.4: + resolution: { integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== } + + concat-map@0.0.1: + resolution: { integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== } + + cross-spawn@7.0.6: + resolution: { integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== } + engines: { node: '>= 8' } + + debug@4.4.3: + resolution: { integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA== } + engines: { node: '>=6.0' } + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + deep-is@0.1.4: + resolution: { integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ== } + + esbuild@0.27.2: + resolution: { integrity: sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw== } + engines: { node: '>=18' } + hasBin: true + + escape-string-regexp@4.0.0: + resolution: { integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== } + engines: { node: '>=10' } + + eslint-scope@8.4.0: + resolution: { integrity: sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + eslint-visitor-keys@3.4.3: + resolution: { integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== } + engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 } + + eslint-visitor-keys@4.2.1: + resolution: { integrity: sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + eslint@9.39.2: + resolution: { integrity: sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + hasBin: true + peerDependencies: + jiti: '*' + peerDependenciesMeta: + jiti: + optional: true + + espree@10.4.0: + resolution: { integrity: sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + esquery@1.7.0: + resolution: { integrity: sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g== } + engines: { node: '>=0.10' } + + esrecurse@4.3.0: + resolution: { integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag== } + engines: { node: '>=4.0' } + + estraverse@5.3.0: + resolution: { integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA== } + engines: { node: '>=4.0' } + + esutils@2.0.3: + resolution: { integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== } + engines: { node: '>=0.10.0' } + + fast-deep-equal@3.1.3: + resolution: { integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== } + + fast-json-stable-stringify@2.1.0: + resolution: { integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== } + + fast-levenshtein@2.0.6: + resolution: { integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== } + + fdir@6.5.0: + resolution: { integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg== } + engines: { node: '>=12.0.0' } + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + + file-entry-cache@8.0.0: + resolution: { integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ== } + engines: { node: '>=16.0.0' } + + find-up@5.0.0: + resolution: { integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== } + engines: { node: '>=10' } + + flat-cache@4.0.1: + resolution: { integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw== } + engines: { node: '>=16' } + + flatted@3.3.3: + resolution: { integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg== } + + fsevents@2.3.3: + resolution: { integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== } + engines: { node: ^8.16.0 || ^10.6.0 || >=11.0.0 } + os: [darwin] + + get-tsconfig@4.13.1: + resolution: { integrity: sha512-EoY1N2xCn44xU6750Sx7OjOIT59FkmstNc3X6y5xpz7D5cBtZRe/3pSlTkDJgqsOk3WwZPkWfonhhUJfttQo3w== } + + glob-parent@6.0.2: + resolution: { integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A== } + engines: { node: '>=10.13.0' } + + globals@14.0.0: + resolution: { integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ== } + engines: { node: '>=18' } + + has-flag@4.0.0: + resolution: { integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== } + engines: { node: '>=8' } + + ignore@5.3.2: + resolution: { integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== } + engines: { node: '>= 4' } + + ignore@7.0.5: + resolution: { integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg== } + engines: { node: '>= 4' } + + import-fresh@3.3.1: + resolution: { integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ== } + engines: { node: '>=6' } + + imurmurhash@0.1.4: + resolution: { integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA== } + engines: { node: '>=0.8.19' } + + is-extglob@2.1.1: + resolution: { integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== } + engines: { node: '>=0.10.0' } + + is-glob@4.0.3: + resolution: { integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== } + engines: { node: '>=0.10.0' } + + isexe@2.0.0: + resolution: { integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== } + + js-yaml@4.1.1: + resolution: { integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== } + hasBin: true + + json-buffer@3.0.1: + resolution: { integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ== } + + json-schema-traverse@0.4.1: + resolution: { integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== } + + json-stable-stringify-without-jsonify@1.0.1: + resolution: { integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== } + + keyv@4.5.4: + resolution: { integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw== } + + levn@0.4.1: + resolution: { integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ== } + engines: { node: '>= 0.8.0' } + + locate-path@6.0.0: + resolution: { integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== } + engines: { node: '>=10' } + + lodash.merge@4.6.2: + resolution: { integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== } + + minimatch@3.1.2: + resolution: { integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== } + + minimatch@9.0.5: + resolution: { integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== } + engines: { node: '>=16 || 14 >=14.17' } + + ms@2.1.3: + resolution: { integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== } + + natural-compare@1.4.0: + resolution: { integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw== } + + optionator@0.9.4: + resolution: { integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g== } + engines: { node: '>= 0.8.0' } + + p-limit@3.1.0: + resolution: { integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== } + engines: { node: '>=10' } + + p-locate@5.0.0: + resolution: { integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== } + engines: { node: '>=10' } + + parent-module@1.0.1: + resolution: { integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== } + engines: { node: '>=6' } + + path-exists@4.0.0: + resolution: { integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== } + engines: { node: '>=8' } + + path-key@3.1.1: + resolution: { integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== } + engines: { node: '>=8' } + + picomatch@4.0.3: + resolution: { integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q== } + engines: { node: '>=12' } + + prelude-ls@1.2.1: + resolution: { integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== } + engines: { node: '>= 0.8.0' } + + prettier@3.8.1: + resolution: { integrity: sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg== } + engines: { node: '>=14' } + hasBin: true + + punycode@2.3.1: + resolution: { integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== } + engines: { node: '>=6' } + + resolve-from@4.0.0: + resolution: { integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== } + engines: { node: '>=4' } + + resolve-pkg-maps@1.0.0: + resolution: { integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw== } + + semver@7.7.3: + resolution: { integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q== } + engines: { node: '>=10' } + hasBin: true + + shebang-command@2.0.0: + resolution: { integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== } + engines: { node: '>=8' } + + shebang-regex@3.0.0: + resolution: { integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== } + engines: { node: '>=8' } + + strip-json-comments@3.1.1: + resolution: { integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== } + engines: { node: '>=8' } + + supports-color@7.2.0: + resolution: { integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== } + engines: { node: '>=8' } + + tinyglobby@0.2.15: + resolution: { integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ== } + engines: { node: '>=12.0.0' } + + ts-api-utils@2.4.0: + resolution: { integrity: sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA== } + engines: { node: '>=18.12' } + peerDependencies: + typescript: '>=4.8.4' + + tsx@4.21.0: + resolution: { integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw== } + engines: { node: '>=18.0.0' } + hasBin: true + + type-check@0.4.0: + resolution: { integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew== } + engines: { node: '>= 0.8.0' } + + typescript@5.9.3: + resolution: { integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw== } + engines: { node: '>=14.17' } + hasBin: true + + uri-js@4.4.1: + resolution: { integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== } + + uuid@11.1.0: + resolution: { integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A== } + hasBin: true + + which@2.0.2: + resolution: { integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== } + engines: { node: '>= 8' } + hasBin: true + + word-wrap@1.2.5: + resolution: { integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA== } + engines: { node: '>=0.10.0' } + + yocto-queue@0.1.0: + resolution: { integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== } + engines: { node: '>=10' } + + zod@4.3.6: + resolution: { integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg== } + +snapshots: + '@esbuild/aix-ppc64@0.27.2': + optional: true + + '@esbuild/android-arm64@0.27.2': + optional: true + + '@esbuild/android-arm@0.27.2': + optional: true + + '@esbuild/android-x64@0.27.2': + optional: true + + '@esbuild/darwin-arm64@0.27.2': + optional: true + + '@esbuild/darwin-x64@0.27.2': + optional: true + + '@esbuild/freebsd-arm64@0.27.2': + optional: true + + '@esbuild/freebsd-x64@0.27.2': + optional: true + + '@esbuild/linux-arm64@0.27.2': + optional: true + + '@esbuild/linux-arm@0.27.2': + optional: true + + '@esbuild/linux-ia32@0.27.2': + optional: true + + '@esbuild/linux-loong64@0.27.2': + optional: true + + '@esbuild/linux-mips64el@0.27.2': + optional: true + + '@esbuild/linux-ppc64@0.27.2': + optional: true + + '@esbuild/linux-riscv64@0.27.2': + optional: true + + '@esbuild/linux-s390x@0.27.2': + optional: true + + '@esbuild/linux-x64@0.27.2': + optional: true + + '@esbuild/netbsd-arm64@0.27.2': + optional: true + + '@esbuild/netbsd-x64@0.27.2': + optional: true + + '@esbuild/openbsd-arm64@0.27.2': + optional: true + + '@esbuild/openbsd-x64@0.27.2': + optional: true + + '@esbuild/openharmony-arm64@0.27.2': + optional: true + + '@esbuild/sunos-x64@0.27.2': + optional: true + + '@esbuild/win32-arm64@0.27.2': + optional: true + + '@esbuild/win32-ia32@0.27.2': + optional: true + + '@esbuild/win32-x64@0.27.2': + optional: true + + '@eslint-community/eslint-utils@4.9.1(eslint@9.39.2)': + dependencies: + eslint: 9.39.2 + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.12.2': {} + + '@eslint/config-array@0.21.1': + dependencies: + '@eslint/object-schema': 2.1.7 + debug: 4.4.3 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + + '@eslint/config-helpers@0.4.2': + dependencies: + '@eslint/core': 0.17.0 + + '@eslint/core@0.17.0': + dependencies: + '@types/json-schema': 7.0.15 + + '@eslint/eslintrc@3.3.3': + dependencies: + ajv: 6.12.6 + debug: 4.4.3 + espree: 10.4.0 + globals: 14.0.0 + ignore: 5.3.2 + import-fresh: 3.3.1 + js-yaml: 4.1.1 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@eslint/js@9.39.2': {} + + '@eslint/object-schema@2.1.7': {} + + '@eslint/plugin-kit@0.4.1': + dependencies: + '@eslint/core': 0.17.0 + levn: 0.4.1 + + '@humanfs/core@0.19.1': {} + + '@humanfs/node@0.16.7': + dependencies: + '@humanfs/core': 0.19.1 + '@humanwhocodes/retry': 0.4.3 + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/retry@0.4.3': {} + + '@types/estree@1.0.8': {} + + '@types/json-schema@7.0.15': {} + + '@typescript-eslint/eslint-plugin@8.54.0(@typescript-eslint/parser@8.54.0(eslint@9.39.2)(typescript@5.9.3))(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@eslint-community/regexpp': 4.12.2 + '@typescript-eslint/parser': 8.54.0(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.54.0 + '@typescript-eslint/type-utils': 8.54.0(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/utils': 8.54.0(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.54.0 + eslint: 9.39.2 + ignore: 7.0.5 + natural-compare: 1.4.0 + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@8.54.0(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@typescript-eslint/scope-manager': 8.54.0 + '@typescript-eslint/types': 8.54.0 + '@typescript-eslint/typescript-estree': 8.54.0(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.54.0 + debug: 4.4.3 + eslint: 9.39.2 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/project-service@8.54.0(typescript@5.9.3)': + dependencies: + '@typescript-eslint/tsconfig-utils': 8.54.0(typescript@5.9.3) + '@typescript-eslint/types': 8.54.0 + debug: 4.4.3 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@8.54.0': + dependencies: + '@typescript-eslint/types': 8.54.0 + '@typescript-eslint/visitor-keys': 8.54.0 + + '@typescript-eslint/tsconfig-utils@8.54.0(typescript@5.9.3)': + dependencies: + typescript: 5.9.3 + + '@typescript-eslint/type-utils@8.54.0(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@typescript-eslint/types': 8.54.0 + '@typescript-eslint/typescript-estree': 8.54.0(typescript@5.9.3) + '@typescript-eslint/utils': 8.54.0(eslint@9.39.2)(typescript@5.9.3) + debug: 4.4.3 + eslint: 9.39.2 + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/types@8.54.0': {} + + '@typescript-eslint/typescript-estree@8.54.0(typescript@5.9.3)': + dependencies: + '@typescript-eslint/project-service': 8.54.0(typescript@5.9.3) + '@typescript-eslint/tsconfig-utils': 8.54.0(typescript@5.9.3) + '@typescript-eslint/types': 8.54.0 + '@typescript-eslint/visitor-keys': 8.54.0 + debug: 4.4.3 + minimatch: 9.0.5 + semver: 7.7.3 + tinyglobby: 0.2.15 + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@8.54.0(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.2) + '@typescript-eslint/scope-manager': 8.54.0 + '@typescript-eslint/types': 8.54.0 + '@typescript-eslint/typescript-estree': 8.54.0(typescript@5.9.3) + eslint: 9.39.2 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/visitor-keys@8.54.0': + dependencies: + '@typescript-eslint/types': 8.54.0 + eslint-visitor-keys: 4.2.1 + + acorn-jsx@5.3.2(acorn@8.15.0): + dependencies: + acorn: 8.15.0 + + acorn@8.15.0: {} + + ajv@6.12.6: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + argparse@2.0.1: {} + + balanced-match@1.0.2: {} + + brace-expansion@1.1.12: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.2: + dependencies: + balanced-match: 1.0.2 + + callsites@3.1.0: {} + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + concat-map@0.0.1: {} + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + debug@4.4.3: + dependencies: + ms: 2.1.3 + + deep-is@0.1.4: {} + + esbuild@0.27.2: + optionalDependencies: + '@esbuild/aix-ppc64': 0.27.2 + '@esbuild/android-arm': 0.27.2 + '@esbuild/android-arm64': 0.27.2 + '@esbuild/android-x64': 0.27.2 + '@esbuild/darwin-arm64': 0.27.2 + '@esbuild/darwin-x64': 0.27.2 + '@esbuild/freebsd-arm64': 0.27.2 + '@esbuild/freebsd-x64': 0.27.2 + '@esbuild/linux-arm': 0.27.2 + '@esbuild/linux-arm64': 0.27.2 + '@esbuild/linux-ia32': 0.27.2 + '@esbuild/linux-loong64': 0.27.2 + '@esbuild/linux-mips64el': 0.27.2 + '@esbuild/linux-ppc64': 0.27.2 + '@esbuild/linux-riscv64': 0.27.2 + '@esbuild/linux-s390x': 0.27.2 + '@esbuild/linux-x64': 0.27.2 + '@esbuild/netbsd-arm64': 0.27.2 + '@esbuild/netbsd-x64': 0.27.2 + '@esbuild/openbsd-arm64': 0.27.2 + '@esbuild/openbsd-x64': 0.27.2 + '@esbuild/openharmony-arm64': 0.27.2 + '@esbuild/sunos-x64': 0.27.2 + '@esbuild/win32-arm64': 0.27.2 + '@esbuild/win32-ia32': 0.27.2 + '@esbuild/win32-x64': 0.27.2 + + escape-string-regexp@4.0.0: {} + + eslint-scope@8.4.0: + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint-visitor-keys@4.2.1: {} + + eslint@9.39.2: + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.2) + '@eslint-community/regexpp': 4.12.2 + '@eslint/config-array': 0.21.1 + '@eslint/config-helpers': 0.4.2 + '@eslint/core': 0.17.0 + '@eslint/eslintrc': 3.3.3 + '@eslint/js': 9.39.2 + '@eslint/plugin-kit': 0.4.1 + '@humanfs/node': 0.16.7 + '@humanwhocodes/module-importer': 1.0.1 + '@humanwhocodes/retry': 0.4.3 + '@types/estree': 1.0.8 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.6 + debug: 4.4.3 + escape-string-regexp: 4.0.0 + eslint-scope: 8.4.0 + eslint-visitor-keys: 4.2.1 + espree: 10.4.0 + esquery: 1.7.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 8.0.0 + find-up: 5.0.0 + glob-parent: 6.0.2 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + json-stable-stringify-without-jsonify: 1.0.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.4 + transitivePeerDependencies: + - supports-color + + espree@10.4.0: + dependencies: + acorn: 8.15.0 + acorn-jsx: 5.3.2(acorn@8.15.0) + eslint-visitor-keys: 4.2.1 + + esquery@1.7.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@5.3.0: {} + + esutils@2.0.3: {} + + fast-deep-equal@3.1.3: {} + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fdir@6.5.0(picomatch@4.0.3): + optionalDependencies: + picomatch: 4.0.3 + + file-entry-cache@8.0.0: + dependencies: + flat-cache: 4.0.1 + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@4.0.1: + dependencies: + flatted: 3.3.3 + keyv: 4.5.4 + + flatted@3.3.3: {} + + fsevents@2.3.3: + optional: true + + get-tsconfig@4.13.1: + dependencies: + resolve-pkg-maps: 1.0.0 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + globals@14.0.0: {} + + has-flag@4.0.0: {} + + ignore@5.3.2: {} + + ignore@7.0.5: {} + + import-fresh@3.3.1: + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + imurmurhash@0.1.4: {} + + is-extglob@2.1.1: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + isexe@2.0.0: {} + + js-yaml@4.1.1: + dependencies: + argparse: 2.0.1 + + json-buffer@3.0.1: {} + + json-schema-traverse@0.4.1: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lodash.merge@4.6.2: {} + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.12 + + minimatch@9.0.5: + dependencies: + brace-expansion: 2.0.2 + + ms@2.1.3: {} + + natural-compare@1.4.0: {} + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + parent-module@1.0.1: + dependencies: + callsites: 3.1.0 + + path-exists@4.0.0: {} + + path-key@3.1.1: {} + + picomatch@4.0.3: {} + + prelude-ls@1.2.1: {} + + prettier@3.8.1: {} + + punycode@2.3.1: {} + + resolve-from@4.0.0: {} + + resolve-pkg-maps@1.0.0: {} + + semver@7.7.3: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + strip-json-comments@3.1.1: {} + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + tinyglobby@0.2.15: + dependencies: + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + + ts-api-utils@2.4.0(typescript@5.9.3): + dependencies: + typescript: 5.9.3 + + tsx@4.21.0: + dependencies: + esbuild: 0.27.2 + get-tsconfig: 4.13.1 + optionalDependencies: + fsevents: 2.3.3 + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + typescript@5.9.3: {} + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + uuid@11.1.0: {} + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + word-wrap@1.2.5: {} + + yocto-queue@0.1.0: {} + + zod@4.3.6: {} diff --git a/bubus-ts/prettier.config.js b/bubus-ts/prettier.config.js new file mode 100644 index 0000000..98b89f5 --- /dev/null +++ b/bubus-ts/prettier.config.js @@ -0,0 +1,8 @@ +const config = { + semi: false, + singleQuote: true, + trailingComma: 'es5', + printWidth: 140, +} + +export default config diff --git a/bubus-ts/src/async_context.ts b/bubus-ts/src/async_context.ts new file mode 100644 index 0000000..c2ed50a --- /dev/null +++ b/bubus-ts/src/async_context.ts @@ -0,0 +1,53 @@ +declare const process: { versions?: { node?: string } } | undefined + +type AsyncLocalStorageLike = { + getStore(): unknown + run(store: unknown, callback: () => T): T + enterWith?(store: unknown): void +} + +export type { AsyncLocalStorageLike } + +// Cache the AsyncLocalStorage constructor so multiple modules can create separate instances. +let _AsyncLocalStorageClass: (new () => AsyncLocalStorageLike) | null = null + +const is_node = typeof process !== 'undefined' && typeof process.versions !== 'undefined' && typeof process.versions.node === 'string' + +if (is_node) { + try { + const importer = new Function('specifier', 'return import(specifier)') as ( + specifier: string + ) => Promise<{ AsyncLocalStorage?: new () => AsyncLocalStorageLike }> + const mod = await importer('node:async_hooks') + if (mod?.AsyncLocalStorage) { + _AsyncLocalStorageClass = mod.AsyncLocalStorage + } + } catch { + _AsyncLocalStorageClass = null + } +} + +/** Create a new AsyncLocalStorage instance, or null if unavailable (e.g. in browsers). */ +export const createAsyncLocalStorage = (): AsyncLocalStorageLike | null => { + if (!_AsyncLocalStorageClass) return null + return new _AsyncLocalStorageClass() +} + +// The primary AsyncLocalStorage instance used for event dispatch context propagation. +export let async_local_storage: AsyncLocalStorageLike | null = _AsyncLocalStorageClass ? new _AsyncLocalStorageClass() : null + +export const captureAsyncContext = (): unknown | null => { + if (!async_local_storage) { + return null + } + return async_local_storage.getStore() ?? null +} + +export const runWithAsyncContext = (context: unknown | null, fn: () => T): T => { + if (!async_local_storage) { + return fn() + } + return async_local_storage.run(context ?? undefined, fn) +} + +export const hasAsyncLocalStorage = (): boolean => async_local_storage !== null diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts new file mode 100644 index 0000000..b333b89 --- /dev/null +++ b/bubus-ts/src/base_event.ts @@ -0,0 +1,482 @@ +import { z } from 'zod' +import { v7 as uuidv7 } from 'uuid' + +import type { EventBus } from './event_bus.js' +import { EventResult } from './event_result.js' +import type { ConcurrencyMode, Deferred } from './lock_manager.js' +import { CONCURRENCY_MODES, withResolvers } from './lock_manager.js' +import { extractZodShape, getStringTypeName, isZodSchema, toJsonSchema } from './types.js' + +export const BaseEventSchema = z + .object({ + event_id: z.string().uuid(), + event_created_at: z.string().datetime(), + event_created_ts: z.number().optional(), + event_type: z.string(), + event_timeout: z.number().positive().nullable(), + event_parent_id: z.string().uuid().optional(), + event_path: z.array(z.string()).optional(), + event_result_type: z.string().optional(), + event_result_schema: z.unknown().optional(), + event_emitted_by_handler_id: z.string().uuid().optional(), + event_pending_bus_count: z.number().nonnegative().optional(), + event_status: z.enum(['pending', 'started', 'completed']).optional(), + event_started_at: z.string().datetime().optional(), + event_started_ts: z.number().optional(), + event_completed_at: z.string().datetime().optional(), + event_completed_ts: z.number().optional(), + event_results: z.array(z.unknown()).optional(), + event_concurrency: z.enum(CONCURRENCY_MODES).optional(), + event_handler_concurrency: z.enum(CONCURRENCY_MODES).optional(), + }) + .loose() + +export type BaseEventData = z.infer +type BaseEventFields = Pick< + BaseEventData, + | 'event_id' + | 'event_created_at' + | 'event_created_ts' + | 'event_type' + | 'event_timeout' + | 'event_parent_id' + | 'event_path' + | 'event_result_type' + | 'event_result_schema' + | 'event_emitted_by_handler_id' + | 'event_pending_bus_count' + | 'event_status' + | 'event_started_at' + | 'event_started_ts' + | 'event_completed_at' + | 'event_completed_ts' + | 'event_results' + | 'event_concurrency' + | 'event_handler_concurrency' +> + +export type BaseEventInit> = TFields & Partial + +type BaseEventSchemaShape = typeof BaseEventSchema.shape + +export type EventSchema = z.ZodObject +type EventPayload = z.infer> + +type EventInput = z.input> +export type EventInit = Omit, keyof BaseEventFields> & Partial + +type EventWithResult = BaseEvent & { __event_result_type__?: TResult } + +type ResultTypeFromShape = TShape extends { event_result_schema: infer S } + ? S extends z.ZodTypeAny + ? z.infer + : unknown + : unknown + +export type EventFactory = { + (data: EventInit): EventWithResult & EventPayload + new (data: EventInit): EventWithResult & EventPayload + schema: EventSchema + event_type?: string + event_result_schema?: z.ZodTypeAny + event_result_type?: string + fromJSON?: (data: unknown) => EventWithResult & EventPayload +} + +type ZodShapeFrom> = { + [K in keyof TShape as K extends 'event_result_schema' | 'event_result_type' | 'event_result_schema_json' + ? never + : TShape[K] extends z.ZodTypeAny + ? K + : never]: Extract +} + +export class BaseEvent { + // event metadata fields + event_id!: string // unique uuidv7 identifier for the event + event_created_at!: string // ISO datetime string version of event_created_at + event_created_ts!: number // nanosecond monotonic version of event_created_at + event_type!: string // should match the class name of the event, e.g. BaseEvent.extend("MyEvent").event_type === "MyEvent" + event_timeout!: number | null // maximum time in seconds that the event is allowed to run before it is aborted + event_parent_id?: string // id of the parent event that triggered this event, if this event was emitted during handling of another event + event_path!: string[] // list of bus names that the event has been dispatched to, including the current bus + event_result_schema?: z.ZodTypeAny // optional zod schema to enforce the shape of return values from handlers + event_result_type?: string // optional string identifier of the type of the return values from handlers, to make it easier to reference common shapes across networkboundaries e.g. ScreenshotEventResultType + event_results!: Map> // map of handler ids to EventResult objects for the event + event_emitted_by_handler_id?: string // if event was emitted inside a handler while it was running, this will be set to the enclosing handler's handler id + event_pending_bus_count!: number // number of buses that have accepted this event and not yet finished processing or removed it from their queues (for queue-jump processing) + event_status!: 'pending' | 'started' | 'completed' // processing status of the event as a whole, no separate 'error' state because events can not error, only individual handlers can + event_started_at?: string // ISO datetime string version of event_started_ts + event_started_ts?: number // nanosecond monotonic version of event_started_at + event_completed_at?: string // ISO datetime string version of event_completed_ts + event_completed_ts?: number // nanosecond monotonic version of event_completed_at + event_concurrency?: ConcurrencyMode // concurrency mode for the event as a whole in relation to other events + event_handler_concurrency?: ConcurrencyMode // concurrency mode for the handlers within the event + + static event_type?: string // class name of the event, e.g. BaseEvent.extend("MyEvent").event_type === "MyEvent" + static schema = BaseEventSchema // zod schema for the event data fields, used to parse and validate event data when creating a new event + + // internal runtime state + bus?: EventBus // shortcut to the bus that dispatched this event, for event.bus.dispatch(event) auto-child tracking via proxy wrapping + _event_original?: BaseEvent // underlying event object that was dispatched, if this is a bus-scoped proxy wrapping it + _event_dispatch_context?: unknown | null // captured AsyncLocalStorage context at dispatch site, used to restore that context when running handlers + + _event_done_signal: Deferred | null + + constructor(data: BaseEventInit> = {}) { + const ctor = this.constructor as typeof BaseEvent & { + event_result_schema?: z.ZodTypeAny + event_result_type?: string + } + const event_type = data.event_type ?? ctor.event_type ?? ctor.name + const event_result_schema = (data.event_result_schema ?? ctor.event_result_schema) as z.ZodTypeAny | undefined + const event_result_type = data.event_result_type ?? ctor.event_result_type ?? getStringTypeName(event_result_schema) + const event_id = data.event_id ?? uuidv7() + const { isostring: default_event_created_at, ts: event_created_ts } = BaseEvent.nextTimestamp() + const event_created_at = data.event_created_at ?? default_event_created_at + const event_timeout = data.event_timeout ?? null + + const base_data = { + ...data, + event_id, + event_created_at, + event_type, + event_timeout, + event_result_schema, + event_result_type, + } + + const schema = ctor.schema ?? BaseEventSchema + const parsed = schema.parse(base_data) as BaseEventData & Record + + Object.assign(this, parsed) + + const parsed_path = (parsed as { event_path?: string[] }).event_path + this.event_path = Array.isArray(parsed_path) ? [...parsed_path] : [] + + // load event results from potentially raw objects from JSON to proper EventResult objects + this.event_results = hydrateEventResults(this, (parsed as { event_results?: unknown }).event_results) + this.event_pending_bus_count = + typeof (parsed as { event_pending_bus_count?: unknown }).event_pending_bus_count === 'number' + ? Math.max(0, Number((parsed as { event_pending_bus_count?: number }).event_pending_bus_count)) + : 0 + const parsed_status = (parsed as { event_status?: unknown }).event_status + this.event_status = + parsed_status === 'pending' || parsed_status === 'started' || parsed_status === 'completed' ? parsed_status : 'pending' + + this.event_started_at = + typeof (parsed as { event_started_at?: unknown }).event_started_at === 'string' + ? (parsed as { event_started_at: string }).event_started_at + : undefined + this.event_started_ts = + typeof (parsed as { event_started_ts?: unknown }).event_started_ts === 'number' + ? (parsed as { event_started_ts: number }).event_started_ts + : undefined + this.event_completed_at = + typeof (parsed as { event_completed_at?: unknown }).event_completed_at === 'string' + ? (parsed as { event_completed_at: string }).event_completed_at + : undefined + this.event_completed_ts = + typeof (parsed as { event_completed_ts?: unknown }).event_completed_ts === 'number' + ? (parsed as { event_completed_ts: number }).event_completed_ts + : undefined + this.event_emitted_by_handler_id = + typeof (parsed as { event_emitted_by_handler_id?: unknown }).event_emitted_by_handler_id === 'string' + ? (parsed as { event_emitted_by_handler_id: string }).event_emitted_by_handler_id + : undefined + + this.event_result_schema = event_result_schema + this.event_result_type = event_result_type + this.event_created_ts = + typeof (parsed as { event_created_ts?: unknown }).event_created_ts === 'number' + ? (parsed as { event_created_ts: number }).event_created_ts + : event_created_ts + + this._event_done_signal = null + this._event_dispatch_context = undefined + } + + // "MyEvent#a48f" + toString(): string { + return `${this.event_type}#${this.event_id.slice(-4)}` + } + + // get the next monotonic timestamp for global ordering of all operations + static nextTimestamp(): { date: Date; isostring: string; ts: number } { + const ts = performance.now() + const date = new Date(performance.timeOrigin + ts) + return { date, isostring: date.toISOString(), ts } + } + + // main entry point for users to define their own event types + // BaseEvent.extend("MyEvent", { some_custom_field: z.string(), event_result_schema: z.string(), event_timeout: 25, ... }) -> MyEvent + static extend(event_type: string, shape?: TShape): EventFactory> + static extend>( + event_type: string, + shape?: TShape + ): EventFactory, ResultTypeFromShape> + static extend>( + event_type: string, + shape: TShape = {} as TShape + ): EventFactory, ResultTypeFromShape> { + const raw_shape = shape as Record + + const event_result_schema = isZodSchema(raw_shape.event_result_schema) ? (raw_shape.event_result_schema as z.ZodTypeAny) : undefined + const explicit_event_result_type = typeof raw_shape.event_result_type === 'string' ? raw_shape.event_result_type : undefined + const event_result_type = explicit_event_result_type ?? getStringTypeName(event_result_schema) + + const zod_shape = extractZodShape(raw_shape) + const full_schema = BaseEventSchema.extend(zod_shape) + + // create a new event class that extends BaseEvent and adds the custom fields + class ExtendedEvent extends BaseEvent { + static schema = full_schema as unknown as typeof BaseEvent.schema + static event_type = event_type + static event_result_schema = event_result_schema + static event_result_type = event_result_type + + constructor(data: EventInit>) { + super(data as BaseEventInit>) + } + } + + type FactoryResult = EventWithResult> & EventPayload> + + function EventFactory(data: EventInit>): FactoryResult { + return new ExtendedEvent(data) as FactoryResult + } + + EventFactory.schema = full_schema as EventSchema> + EventFactory.event_type = event_type + EventFactory.event_result_schema = event_result_schema + EventFactory.event_result_type = event_result_type + EventFactory.fromJSON = (data: unknown) => (ExtendedEvent.fromJSON as (data: unknown) => FactoryResult)(data) + EventFactory.prototype = ExtendedEvent.prototype + ;(EventFactory as unknown as { class: typeof ExtendedEvent }).class = ExtendedEvent + + return EventFactory as unknown as EventFactory, ResultTypeFromShape> + } + + // parse raw event data into a new event object + static parse(this: T, data: unknown): InstanceType { + const schema = this.schema ?? BaseEventSchema + const parsed = schema.parse(data) + return new this(parsed) as InstanceType + } + + static fromJSON(this: T, data: unknown): InstanceType { + if (!data || typeof data !== 'object') { + return this.parse(data) + } + const record = { ...(data as Record) } + if (record.event_result_schema && !isZodSchema(record.event_result_schema)) { + const zod_any = z as unknown as { fromJSONSchema?: (schema: unknown) => z.ZodTypeAny } + if (typeof zod_any.fromJSONSchema === 'function') { + record.event_result_schema = zod_any.fromJSONSchema(record.event_result_schema) + } + } + return new this(record as BaseEventInit>) as InstanceType + } + + toJSON(): BaseEventData { + return { + event_id: this.event_id, + event_created_at: this.event_created_at, + event_created_ts: this.event_created_ts, + event_type: this.event_type, + event_timeout: this.event_timeout, + event_parent_id: this.event_parent_id, + event_path: this.event_path, + event_result_type: this.event_result_type, + event_emitted_by_handler_id: this.event_emitted_by_handler_id, + event_pending_bus_count: this.event_pending_bus_count, + event_status: this.event_status, + event_started_at: this.event_started_at, + event_started_ts: this.event_started_ts, + event_completed_at: this.event_completed_at, + event_completed_ts: this.event_completed_ts, + event_results: Array.from(this.event_results.values()).map((result) => result.toJSON()), + event_concurrency: this.event_concurrency, + event_handler_concurrency: this.event_handler_concurrency, + event_result_schema: this.event_result_schema ? toJsonSchema(this.event_result_schema) : this.event_result_schema, + } + } + + // Get parent event object from event_parent_id (checks across all busses) + get event_parent(): BaseEvent | undefined { + const original = this._event_original ?? this + const parent_id = original.event_parent_id + if (!parent_id) { + return undefined + } + return original.bus?.findEventById(parent_id) ?? undefined + } + + // get all direct children of this event + get event_children(): BaseEvent[] { + const children: BaseEvent[] = [] + const seen = new Set() + for (const result of this.event_results.values()) { + for (const child of result.event_children) { + if (!seen.has(child.event_id)) { + seen.add(child.event_id) + children.push(child) + } + } + } + return children + } + + // get all children grandchildren etc. recursively + get event_descendants(): BaseEvent[] { + const descendants: BaseEvent[] = [] + const visited = new Set() + const root_id = this.event_id + const stack = [...this.event_children] + + while (stack.length > 0) { + const child = stack.pop() + if (!child) { + continue + } + const child_id = child.event_id + if (child_id === root_id) { + continue + } + if (visited.has(child_id)) { + continue + } + visited.add(child_id) + descendants.push(child) + if (child.event_children.length > 0) { + stack.push(...child.event_children) + } + } + + return descendants + } + + // awaitable that triggers immediate (queue-jump) processing of the event on all buses where it is queued + // use event.waitForCompletion() or event.finished() to wait for the event to be processed in normal queue order + done(): Promise { + if (!this.bus) { + return Promise.reject(new Error('event has no bus attached')) + } + if (this.event_status === 'completed') { + return Promise.resolve(this) + } + // Always delegate to processEventImmediately β€” it walks up the parent event tree + // to determine whether we're inside a handler (works cross-bus). If no + // ancestor handler is in-flight, it falls back to waitForCompletion(). + const runner_bus = this.bus as { + processEventImmediately: (event: BaseEvent) => Promise + } + return runner_bus.processEventImmediately(this) as Promise + } + + // clearer alias for done() to indicate that the event will be processed immediately + // await bus.dispatch(event).immediate() is less ambiguous than await event.done() + immediate(): Promise { + return this.done() + } + + // awaitable that waits for the event to be processed in normal queue order by the runloop + waitForCompletion(): Promise { + if (this.event_status === 'completed') { + return Promise.resolve(this) + } + this._notifyDoneListeners() + return this._event_done_signal!.promise + } + + // convenience alias for await event.waitForCompletion() + finished(): Promise { + return this.waitForCompletion() + } + + markStarted(): void { + if (this.event_status !== 'pending') { + return + } + this.event_status = 'started' + const { isostring: event_started_at, ts: event_started_ts } = BaseEvent.nextTimestamp() + this.event_started_at = event_started_at + this.event_started_ts = event_started_ts + } + + markCompleted(force: boolean = true): void { + if (this.event_status === 'completed') { + return + } + if (!force) { + if (this.event_pending_bus_count > 0) { + return + } + if (!this.eventAreAllChildrenComplete()) { + return + } + } + this.event_status = 'completed' + const { isostring: event_completed_at, ts: event_completed_ts } = BaseEvent.nextTimestamp() + this.event_completed_at = event_completed_at + this.event_completed_ts = event_completed_ts + this._event_dispatch_context = null + this._notifyDoneListeners() + this._event_done_signal!.resolve(this) + this._event_done_signal = null + } + + get event_errors(): unknown[] { + const errors: unknown[] = [] + for (const result of this.event_results.values()) { + if (result.error !== undefined) { + errors.push(result.error) + } + } + return errors + } + + eventAreAllChildrenComplete(): boolean { + for (const descendant of this.event_descendants) { + if (descendant.event_status !== 'completed') { + return false + } + } + return true + } + + _notifyDoneListeners(): void { + if (this._event_done_signal) { + return + } + this._event_done_signal = withResolvers() + } + + // Break internal reference chains so a completed event can be GC'd when + // evicted from event_history. Called by EventBus.trimHistory(). + _gc(): void { + this._event_done_signal = null + this._event_dispatch_context = null + this.bus = undefined + for (const result of this.event_results.values()) { + result.event_children = [] + } + this.event_results.clear() + } +} + +const hydrateEventResults = (event: TEvent, raw_event_results: unknown): Map> => { + const event_results = new Map>() + if (!Array.isArray(raw_event_results)) { + return event_results + } + for (const item of raw_event_results) { + const result = EventResult.fromJSON(event, item) + if (!result) { + continue + } + const map_key = typeof result.handler_id === 'string' && result.handler_id.length > 0 ? result.handler_id : result.id + event_results.set(map_key, result) + } + return event_results +} diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts new file mode 100644 index 0000000..9e4409a --- /dev/null +++ b/bubus-ts/src/event_bus.ts @@ -0,0 +1,1184 @@ +import { BaseEvent } from './base_event.js' +import { EventResult } from './event_result.js' +import { captureAsyncContext, runWithAsyncContext } from './async_context.js' +import { AsyncSemaphore, type ConcurrencyMode, HandlerLock, LockManager, runWithSemaphore, withResolvers } from './lock_manager.js' +import { + EventHandlerAbortedError, + EventHandlerCancelledError, + EventHandlerTimeoutError, + EventHandlerResultSchemaError, + EventHandler, +} from './event_handler.js' +import { logTree } from './logging.js' + +import type { EventClass, EventHandlerFunction, EventKey, FindOptions, UntypedEventHandlerFunction } from './types.js' + +type FindWaiter = { + // similar to a handler, except its for .find() calls + // needs to be different because it's resolved on dispatch not event processing time + // also is ephemeral, gets unregistered the moment it resolves and + // doesnt show up in event processing tree, doesn't block runloop, etc. + event_key: EventKey + matches: (event: BaseEvent) => boolean + resolve: (event: BaseEvent) => void + timeout_id?: ReturnType +} + +type EventBusOptions = { + max_history_size?: number | null + event_concurrency?: ConcurrencyMode + event_handler_concurrency?: ConcurrencyMode + event_timeout?: number | null // default handler timeout in seconds, applied when event.event_timeout is undefined + event_handler_slow_timeout?: number | null // threshold before a warning is logged about slow handler execution + event_slow_timeout?: number | null // threshold before a warning is logged about slow event processing +} + +// Global registry of all EventBus instances to allow for cross-bus coordination when global-serial concurrency mode is used +class GlobalEventBusInstanceRegistry { + private _refs = new Set>() + private _lookup = new WeakMap>() + private _gc = + typeof FinalizationRegistry !== 'undefined' + ? new FinalizationRegistry>((ref) => { + this._refs.delete(ref) + }) + : null + + add(bus: EventBus): void { + const ref = new WeakRef(bus) + this._refs.add(ref) + this._lookup.set(bus, ref) + this._gc?.register(bus, ref, bus) + } + + delete(bus: EventBus): void { + const ref = this._lookup.get(bus) + if (!ref) return + this._refs.delete(ref) + this._lookup.delete(bus) + this._gc?.unregister(bus) + } + + has(bus: EventBus): boolean { + return this._lookup.get(bus)?.deref() !== undefined + } + + get size(): number { + let n = 0 + for (const ref of this._refs) ref.deref() ? n++ : this._refs.delete(ref) + return n + } + + *[Symbol.iterator](): Iterator { + for (const ref of this._refs) { + const bus = ref.deref() + if (bus) yield bus + else this._refs.delete(ref) + } + } + + // find an event by its id across all buses + findEventById(event_id: string): BaseEvent | null { + for (const bus of this) { + const event = bus.event_history.get(event_id) + if (event) { + return event + } + } + return null + } +} + +export class EventBus { + static _all_instances = new GlobalEventBusInstanceRegistry() + + name: string // name of the event bus, recommended to include the word "Bus" in the name for clarity in logs + + // configuration options + max_history_size: number | null // max number of completed events kept in log, set to null for unlimited history + event_concurrency_default: ConcurrencyMode + event_handler_concurrency_default: ConcurrencyMode + event_timeout_default: number | null + event_handler_slow_timeout: number | null + event_slow_timeout: number | null + + // public runtime state + handlers: Map // map of handler uuidv5 ids to EventHandler objects + event_history: Map // map of event uuidv7 ids to processed BaseEvent objects + + // internal runtime state + pending_event_queue: BaseEvent[] // queue of events that have been dispatched to the bus but not yet processed + in_flight_event_ids: Set // set of event ids that are currently being processed by the bus + runloop_running: boolean + locks: LockManager + find_waiters: Set // set of FindWaiter objects that are waiting for a matching future event + + constructor(name: string = 'EventBus', options: EventBusOptions = {}) { + this.name = name + + // set configuration options + this.max_history_size = options.max_history_size === undefined ? 100 : options.max_history_size + this.event_concurrency_default = options.event_concurrency ?? 'bus-serial' + this.event_handler_concurrency_default = options.event_handler_concurrency ?? 'bus-serial' + this.event_timeout_default = options.event_timeout === undefined ? 60 : options.event_timeout + this.event_handler_slow_timeout = options.event_handler_slow_timeout === undefined ? 30 : options.event_handler_slow_timeout + this.event_slow_timeout = options.event_slow_timeout === undefined ? 300 : options.event_slow_timeout + + // initialize runtime state + this.handlers = new Map() + this.event_history = new Map() + this.pending_event_queue = [] + this.in_flight_event_ids = new Set() + this.runloop_running = false + this.locks = new LockManager(this) + this.find_waiters = new Set() + + EventBus._all_instances.add(this) + + this.dispatch = this.dispatch.bind(this) + this.emit = this.emit.bind(this) + } + + toString(): string { + if (this.name.toLowerCase().includes('bus')) { + return `${this.name}` + } + return `EventBus(${this.name})` // for clarity that its a bus if bus is not in the name + } + + // destroy the event bus and all its state to allow for garbage collection + destroy(): void { + EventBus._all_instances.delete(this) + this.handlers.clear() + for (const event of this.event_history.values()) { + event._gc() + } + this.event_history.clear() + this.pending_event_queue.length = 0 + this.in_flight_event_ids.clear() + this.find_waiters.clear() + this.locks.clear() + } + + on( + event_key: EventClass, + handler: EventHandlerFunction, + options?: { event_handler_concurrency?: ConcurrencyMode; handler_timeout?: number | null } + ): EventHandler + on( + event_key: string | '*', + handler: UntypedEventHandlerFunction, + options?: { event_handler_concurrency?: ConcurrencyMode; handler_timeout?: number | null } + ): EventHandler + on( + event_key: EventKey | '*', + handler: EventHandlerFunction | UntypedEventHandlerFunction, + options: { event_handler_concurrency?: ConcurrencyMode; handler_timeout?: number | null } = {} + ): EventHandler { + const normalized_key = this.normalizeEventKey(event_key) // get string event_type or '*' + const handler_name = handler.name || 'anonymous' // get handler function name or 'anonymous' if the handler is an anonymous/arrow function + const { isostring: handler_registered_at, ts: handler_registered_ts } = BaseEvent.nextTimestamp() + const handler_timeout = options.handler_timeout ?? this.event_timeout_default + const handler_entry = new EventHandler({ + handler: handler as EventHandlerFunction, + handler_name, + handler_timeout, + event_handler_concurrency: options.event_handler_concurrency, + handler_registered_at, + handler_registered_ts, + event_key: normalized_key, + eventbus_name: this.name, + }) + + this.handlers.set(handler_entry.id, handler_entry) + return handler_entry + } + + off(event_key: EventKey | '*', handler?: EventHandlerFunction | string | EventHandler): void { + const normalized_key = this.normalizeEventKey(event_key) + if (typeof handler === 'object' && handler instanceof EventHandler && handler.id !== undefined) { + handler = handler.id + } + const match_by_id = typeof handler === 'string' + for (const entry of this.handlers.values()) { + if (entry.event_key !== normalized_key) { + continue + } + const handler_id = entry.id + if (handler === undefined || (match_by_id ? handler_id === handler : entry.handler === (handler as EventHandlerFunction))) { + this.handlers.delete(handler_id) + } + } + } + + dispatch(event: T, _event_key?: EventKey): T { + const original_event = event._event_original ?? event // if event is a bus-scoped proxy already, get the original underlying event object + if (!original_event.bus) { + // if we are the first bus to dispatch this event, set the bus property on the original event object + original_event.bus = this + } + if (!Array.isArray(original_event.event_path)) { + original_event.event_path = [] + } + if (original_event._event_dispatch_context === undefined) { + // when used in fastify/nextjs/other contexts with tracing based on AsyncLocalStorage in node + // we want to capture the context at the dispatch site and use it when running handlers + // because events may be handled async in a separate context than the dispatch site + original_event._event_dispatch_context = captureAsyncContext() + } + if (original_event.event_timeout === null) { + original_event.event_timeout = this.event_timeout_default + } + + if (original_event.event_path.includes(this.name) || this.hasProcessedEvent(original_event)) { + return this.getEventProxyScopedToThisBus(original_event) as T + } + + if (!original_event.event_path.includes(this.name)) { + original_event.event_path.push(this.name) + } + + if (original_event.event_parent_id && original_event.event_emitted_by_handler_id) { + const parent_result = original_event.event_parent?.event_results.get(original_event.event_emitted_by_handler_id) + if (parent_result) { + parent_result.linkEmittedChildEvent(original_event) + } + } + + this.event_history.set(original_event.event_id, original_event) + this.trimHistory() + + original_event.event_pending_bus_count += 1 + this.pending_event_queue.push(original_event) + this.startRunloop() + + return this.getEventProxyScopedToThisBus(original_event) as T + } + + // alias for dispatch + emit(event: T, event_key?: EventKey): T { + return this.dispatch(event, event_key) + } + + // find a recent event or wait for a future event that matches some criteria + find(event_key: EventKey, options?: FindOptions): Promise + find(event_key: EventKey, where: (event: T) => boolean, options?: FindOptions): Promise + async find( + event_key: EventKey, + where_or_options: ((event: T) => boolean) | FindOptions = {}, + maybe_options: FindOptions = {} + ): Promise { + const where = typeof where_or_options === 'function' ? where_or_options : () => true + const options = typeof where_or_options === 'function' ? maybe_options : where_or_options + + const past = options.past ?? true + const future = options.future ?? true + const child_of = options.child_of ?? null + + if (past === false && future === false) { + return null + } + + const matches = (event: BaseEvent): boolean => { + if (!this.eventMatchesKey(event, event_key)) { + return false + } + if (!where(event as T)) { + return false + } + if (child_of && !this.eventIsChildOf(event, child_of)) { + return false + } + return true + } + + // find an event in the history that matches the criteria + if (past !== false || future !== false) { + const now_ms = performance.timeOrigin + performance.now() + const cutoff_ms = past === true ? null : now_ms - Math.max(0, Number(past)) * 1000 + + const history_values = Array.from(this.event_history.values()) + for (let i = history_values.length - 1; i >= 0; i -= 1) { + const event = history_values[i] + if (!matches(event)) { + continue + } + if (event.event_status === 'completed') { + if (past === false) { + continue + } + if (cutoff_ms !== null && Date.parse(event.event_created_at) < cutoff_ms) { + continue + } + return this.getEventProxyScopedToThisBus(event) as T + } + if (future !== false) { + return this.getEventProxyScopedToThisBus(event) as T + } + } + } + + // if we are only looking for past events, return null when no match is found + if (future === false) { + return null + } + + // if we are looking for future events, return a promise that resolves when a match is found + return new Promise((resolve) => { + const waiter: FindWaiter = { + event_key, + matches, + resolve: (event) => resolve(this.getEventProxyScopedToThisBus(event) as T), + } + + if (future !== true) { + const timeout_ms = Math.max(0, Number(future)) * 1000 + waiter.timeout_id = setTimeout(() => { + this.find_waiters.delete(waiter) + resolve(null) + }, timeout_ms) + } + + this.find_waiters.add(waiter) + }) + } + + // Called when a handler does `await child.done()` β€” processes the child event + // immediately ("queue-jump") instead of waiting for the runloop to pick it up. + // + // Yield-and-reacquire: if the calling handler holds a handler concurrency semaphore, + // we temporarily release it so child handlers on the same bus can acquire it + // (preventing deadlock for bus-serial/global-serial modes). We re-acquire after + // the child completes so the parent handler can continue with the semaphore held. + async processEventImmediately(event: T, handler_result?: EventResult): Promise { + const original_event = event._event_original ?? event + // Find the parent handler's result: prefer the proxy-provided one (only if + // the handler is still running), then this bus's stack, then walk up the + // parent event tree (cross-bus case). If none found, we're not inside a + // handler and should fall back to waitForCompletion. + const proxy_result = handler_result?.status === 'started' ? handler_result : undefined + const currently_active_event_result = + proxy_result ?? this.locks.getActiveHandlerResult() ?? this.getParentEventResultAcrossAllBusses(original_event) ?? undefined + if (!currently_active_event_result) { + // Not inside any handler scope β€” avoid queue-jump, but if this event is + // next in line we can process it immediately without waiting on the runloop. + const queue_index = this.pending_event_queue.indexOf(original_event) + const can_process_now = + queue_index === 0 && + !this.locks.isPaused() && + !this.in_flight_event_ids.has(original_event.event_id) && + !this.hasProcessedEvent(original_event) + if (can_process_now) { + this.pending_event_queue.shift() + this.in_flight_event_ids.add(original_event.event_id) + await this.scheduleEventProcessing(original_event) + if (original_event.event_status !== 'completed') { + await original_event.waitForCompletion() + } + return event + } + await original_event.waitForCompletion() + return event + } + + // ensure a pause request is set so the runloop pauses and (will resume when the event is completed) + this.locks.requestRunloopPauseForQueueJumpEvent(currently_active_event_result) + if (original_event.event_status === 'completed') { + return event + } + + const run_queue_jump = currently_active_event_result._lock + ? (fn: () => Promise) => currently_active_event_result._lock!.runQueueJump(fn) + : (fn: () => Promise) => fn() + return await run_queue_jump(async () => { + if (original_event.event_status === 'started') { + await this.runImmediatelyAcrossBuses(original_event) + return event + } + + const index = this.pending_event_queue.indexOf(original_event) + if (index >= 0) { + this.pending_event_queue.splice(index, 1) + } + + await this.runImmediatelyAcrossBuses(original_event) + return event + }) + } + + async waitUntilIdle(): Promise { + await this.locks.waitForIdle() + } + + // Weak idle check: only checks if handlers are idle, doesnt check that the queue is empty + isIdle(): boolean { + for (const event of this.event_history.values()) { + for (const result of event.event_results.values()) { + if (result.eventbus_name !== this.name) { + continue + } + if (result.status === 'pending' || result.status === 'started') { + return false + } + } + } + return true // no handlers are pending or started + } + + // Stronger idle check: no queued work, no in-flight processing, runloop not + // active, and no handlers pending/running for this bus. + isIdleAndQueueEmpty(): boolean { + return this.pending_event_queue.length === 0 && this.in_flight_event_ids.size === 0 && this.isIdle() && !this.runloop_running + } + + eventIsChildOf(event: BaseEvent, ancestor: BaseEvent): boolean { + if (event.event_id === ancestor.event_id) { + return false + } + + let current_parent_id = event.event_parent_id + while (current_parent_id) { + if (current_parent_id === ancestor.event_id) { + return true + } + const parent = this.event_history.get(current_parent_id) + if (!parent) { + return false + } + current_parent_id = parent.event_parent_id + } + return false + } + + eventIsParentOf(parent_event: BaseEvent, child_event: BaseEvent): boolean { + return this.eventIsChildOf(child_event, parent_event) + } + + // return a full detailed tree diagram of all events and results on this bus + logTree(): string { + return logTree(this) + } + + // Resolve an event id from this bus first, then across all known buses. + findEventById(event_id: string): BaseEvent | null { + return this.event_history.get(event_id) ?? EventBus._all_instances.findEventById(event_id) + } + + // Walk up the parent event chain to find an in-flight ancestor handler result. + // Returns the result if found, null otherwise. Used by processEventImmediately to detect + // cross-bus queue-jump scenarios where the calling handler is on a different bus. + getParentEventResultAcrossAllBusses(event: BaseEvent): EventResult | null { + const original = event._event_original ?? event + let current_parent_id = original.event_parent_id + let current_handler_id = original.event_emitted_by_handler_id + while (current_handler_id && current_parent_id) { + const parent = EventBus._all_instances.findEventById(current_parent_id) + if (!parent) break + const handler_result = parent.event_results.get(current_handler_id) + if (handler_result && handler_result.status === 'started') return handler_result + current_parent_id = parent.event_parent_id + current_handler_id = parent.event_emitted_by_handler_id + } + return null + } + + // Processes a queue-jumped event across all buses that have it dispatched. + // Called from processEventImmediately after the parent handler's semaphore has been yielded. + // + // Event semaphore bypass: the initiating bus (this) always bypasses its event semaphore + // since we're inside a handler that already holds it. Other buses only bypass if + // they resolve to the same semaphore instance (i.e. global-serial mode where all + // buses share LockManager.global_event_semaphore). + // + // Handler semaphores are NOT bypassed β€” child handlers must acquire the handler + // semaphore normally. This works because processEventImmediately already released the + // parent's handler semaphore via yield-and-reacquire. + private async runImmediatelyAcrossBuses(event: BaseEvent): Promise { + const buses = this.getBusesForImmediateRun(event) + if (buses.length === 0) { + await event.waitForCompletion() + return + } + + const pause_releases = buses.map((bus) => bus.locks.requestPause()) + + // Determine which event semaphore the initiating bus resolves to, so we can + // detect when other buses share the same instance (global-serial). + const initiating_event_semaphore = this.locks.getSemaphoreForEvent(event) + + try { + for (const bus of buses) { + const index = bus.pending_event_queue.indexOf(event) + if (index >= 0) { + bus.pending_event_queue.splice(index, 1) + } + if (bus.hasProcessedEvent(event)) { + continue + } + if (bus.in_flight_event_ids.has(event.event_id)) { + continue + } + bus.in_flight_event_ids.add(event.event_id) + + // Bypass event semaphore on the initiating bus (we're already inside a handler + // that acquired it). For other buses, only bypass if they resolve to the same + // semaphore instance (global-serial shares one semaphore across all buses). + const bus_event_semaphore = bus.locks.getSemaphoreForEvent(event) + const should_bypass_event_semaphore = + bus === this || (initiating_event_semaphore !== null && bus_event_semaphore === initiating_event_semaphore) + + await bus.scheduleEventProcessing(event, { + bypass_event_semaphores: should_bypass_event_semaphore, + }) + } + + if (event.event_status !== 'completed') { + await event.waitForCompletion() + } + } finally { + for (const release of pause_releases) { + release() + } + } + } + + // Collects buses that currently "own" this event so queue-jump can run it immediately + // across all forwarded buses. Called by runImmediatelyAcrossBuses(), which itself is + // invoked from processEventImmediately (via BaseEvent.done()) when an event is awaited inside + // a handler. Uses event.event_path ordering to pick candidate buses and filters out + // buses that haven't seen the event or already processed it. + private getBusesForImmediateRun(event: BaseEvent): EventBus[] { + const ordered: EventBus[] = [] + const seen = new Set() + + const event_path = Array.isArray(event.event_path) ? event.event_path : [] + for (const name of event_path) { + for (const bus of EventBus._all_instances) { + if (bus.name !== name) { + continue + } + if (!bus.event_history.has(event.event_id)) { + continue + } + if (bus.hasProcessedEvent(event)) { + continue + } + if (!seen.has(bus)) { + ordered.push(bus) + seen.add(bus) + } + } + } + + if (!seen.has(this) && this.event_history.has(event.event_id)) { + ordered.push(this) + } + + return ordered + } + + private startRunloop(): void { + if (this.runloop_running) { + return + } + this.runloop_running = true + queueMicrotask(() => { + void this.runloop() + }) + } + + // schedule the processing of an event on the event bus by its normal runloop + // but set up the bus to process the given event immediately if it is a queue-jump event + private async scheduleEventProcessing( + event: BaseEvent, + options: { + bypass_event_semaphores?: boolean + pre_acquired_semaphore?: AsyncSemaphore | null + } = {} + ): Promise { + try { + const semaphore = options.bypass_event_semaphores ? null : this.locks.getSemaphoreForEvent(event) + const pre_acquired_semaphore = options.pre_acquired_semaphore ?? null + if (pre_acquired_semaphore) { + await this.processEvent(event) + } else { + await runWithSemaphore(semaphore, async () => { + await this.processEvent(event) + }) + } + } finally { + if (options.pre_acquired_semaphore) { + options.pre_acquired_semaphore.release() + } + this.in_flight_event_ids.delete(event.event_id) + this.locks.notifyIdleListeners() + } + } + + private async runloop(): Promise { + for (;;) { + while (this.pending_event_queue.length > 0) { + await Promise.resolve() + if (this.locks.isPaused()) { + await this.locks.waitUntilRunloopResumed() + continue + } + const next_event = this.pending_event_queue[0] + if (!next_event) { + continue + } + const original_event = next_event._event_original ?? next_event + if (this.hasProcessedEvent(original_event)) { + this.pending_event_queue.shift() + continue + } + let pre_acquired_semaphore: AsyncSemaphore | null = null + const event_semaphore = this.locks.getSemaphoreForEvent(original_event) + if (event_semaphore) { + await event_semaphore.acquire() + pre_acquired_semaphore = event_semaphore + } + this.pending_event_queue.shift() + if (this.in_flight_event_ids.has(original_event.event_id)) { + if (pre_acquired_semaphore) { + pre_acquired_semaphore.release() + } + continue + } + this.in_flight_event_ids.add(original_event.event_id) + void this.scheduleEventProcessing(original_event, { + bypass_event_semaphores: true, + pre_acquired_semaphore, + }) + await Promise.resolve() + } + this.runloop_running = false + if (this.pending_event_queue.length > 0) { + this.startRunloop() + return + } + this.locks.notifyIdleListeners() + return + } + } + + private async processEvent(event: BaseEvent): Promise { + if (this.hasProcessedEvent(event)) { + return + } + event.markStarted() + this.notifyFindListeners(event) + + const slow_event_warning_timer = this.createSlowEventWarningTimer(event) + + try { + const handler_entries = this.createPendingHandlerResults(event) + + const handler_promises = handler_entries.map((entry) => this.runEventHandler(event, entry.handler, entry.result)) + await Promise.all(handler_promises) + + event.event_pending_bus_count = Math.max(0, event.event_pending_bus_count - 1) + event.markCompleted(false) + if (event.event_status === 'completed') { + this.notifyEventParentsOfCompletion(event) + } + } finally { + if (slow_event_warning_timer) { + clearTimeout(slow_event_warning_timer) + } + } + } + + // Manually manages the handler concurrency semaphore instead of using runWithSemaphore, + // because processEventImmediately may temporarily yield it during queue-jumping. + async runEventHandler(event: BaseEvent, handler: EventHandler, result: EventResult): Promise { + if (result.status === 'error' && result.error instanceof EventHandlerCancelledError) { + return + } + + const handler_event = this.getEventProxyScopedToThisBus(event, result) + const semaphore = this.locks.getSemaphoreForHandler(event, handler) + + if (semaphore) { + await semaphore.acquire() + } + + // if the result is already in an error or completed state, release the semaphore immediately and return + // prevent double-processing of the event by the same handler + if (result.status === 'error' || result.status === 'completed') { + if (semaphore) semaphore.release() + return + } + + // exit the handler lock if it is already held + if (result._lock) result._lock.exitHandlerRun() + // create a new handler lock to track ownership of the semaphore during handler execution + result._lock = new HandlerLock(semaphore) + this.locks.enterActiveHandlerContext(result) + + // resolve the effective timeout by combining the event timeout and the handler timeout + const effective_timeout = this.resolveEffectiveTimeout(event.event_timeout, result.handler.handler_timeout) + const slow_handler_warning_timer = this.createSlowHandlerWarningTimer(event, result, effective_timeout) + + try { + const abort_signal = result.markStarted() + const handler_result = await Promise.race([this.runHandlerWithTimeout(event, handler, handler_event, result), abort_signal]) + if (event.event_result_schema && handler_result !== undefined) { + // if there is a result schema to enforce, parse the handler's return value and mark the event as completed or errored if it doesn't match the schema + const parsed = event.event_result_schema.safeParse(handler_result) + if (parsed.success) { + result.markCompleted(parsed.data) + } else { + // if the handler's return value doesn't match the schema, mark the event as errored with an error message + const error = new EventHandlerResultSchemaError( + `${this.toString()}.on(${event.toString()}, ${result.handler.toString()}) return value ${JSON.stringify(handler_result).slice(0, 20)}... did not match event_result_schema ${event.event_result_type}: ${parsed.error.message}`, + { event_result: result, cause: parsed.error, raw_value: handler_result } + ) + result.markError(error) + } + } else { + // if there is no result schema to enforce, just mark the event as completed with the raw handler's return value + result.markCompleted(handler_result) + } + } catch (error) { + // if the handler timed out, cancel all pending descendants and mark the event as errored + if (error instanceof EventHandlerTimeoutError) { + result.markError(error) + this.cancelPendingDescendants(event, error) + } else { + result.markError(error) + } + } finally { + result._abort = null + result._lock?.exitHandlerRun() + this.locks.exitActiveHandlerContext(result) + this.locks.releaseRunloopPauseForQueueJumpEvent(result) + if (slow_handler_warning_timer) { + clearTimeout(slow_handler_warning_timer) + } + } + } + + // run a handler with a timeout, returning a promise that resolves or rejects with the handler's result or an error if the timeout is exceeded + private async runHandlerWithTimeout( + event: BaseEvent, + handler: EventHandler, + handler_event: BaseEvent = event, + result: EventResult + ): Promise { + // resolve the effective timeout by combining the event timeout and the handler timeout + const effective_timeout = this.resolveEffectiveTimeout(event.event_timeout, result.handler.handler_timeout) + const run_handler = () => + Promise.resolve().then(() => runWithAsyncContext(event._event_dispatch_context ?? null, () => handler.handler(handler_event))) + + if (effective_timeout === null) { + // if there is no timeout to enforce, just run the handler directly and return the promise + return run_handler() + } + + const timeout_seconds = effective_timeout + const timeout_ms = timeout_seconds * 1000 + + const { promise, resolve, reject } = withResolvers() + let settled = false + + // finalize the promise by clearing the timeout and calling the resolve or reject function + const finalize = (fn: (value?: unknown) => void) => { + return (value?: unknown) => { + if (settled) { + return + } + settled = true + clearTimeout(timer) + fn(value) + } + } + + // set a timeout to reject the promise if the handler takes too long + const timer = setTimeout(() => { + finalize(reject)( + new EventHandlerTimeoutError( + `${this.toString()}.on(${event.toString()}, ${result.handler.toString()}) timed out after ${timeout_seconds}s`, + { + event_result: result, + timeout_seconds, + } + ) + ) + }, timeout_ms) + + run_handler().then(finalize(resolve)).catch(finalize(reject)) + + return promise + } + + private createSlowEventWarningTimer(event: BaseEvent): ReturnType | null { + const event_warn_ms = this.event_slow_timeout === null ? null : this.event_slow_timeout * 1000 + if (event_warn_ms === null) { + return null + } + return setTimeout(() => { + if (event.event_status === 'completed') { + return + } + const running_handler_count = [...event.event_results.values()].filter((result) => result.status === 'started').length + const started_ts = event.event_started_ts ?? event.event_created_ts ?? performance.now() + const elapsed_ms = Math.max(0, performance.now() - started_ts) + const elapsed_seconds = (elapsed_ms / 1000).toFixed(2) + console.warn( + `[bubus] Slow event processing: ${this.name}.on(${event.event_type}#${event.event_id.slice(-4)}, ${running_handler_count} handlers) still running after ${elapsed_seconds}s` + ) + }, event_warn_ms) + } + + private createSlowHandlerWarningTimer( + event: BaseEvent, + result: EventResult, + effective_timeout: number | null + ): ReturnType | null { + const warn_ms = this.event_handler_slow_timeout === null ? null : this.event_handler_slow_timeout * 1000 + const should_warn = warn_ms !== null && (effective_timeout === null || effective_timeout * 1000 > warn_ms) + if (!should_warn || warn_ms === null) { + return null + } + const started_at_ms = performance.now() + return setTimeout(() => { + if (result.status !== 'started') { + return + } + const elapsed_ms = performance.now() - started_at_ms + const elapsed_seconds = (elapsed_ms / 1000).toFixed(1) + console.warn( + `[bubus] Slow event handler: ${this.name}.on(${event.toString()}, ${result.handler.toString()}) still running after ${elapsed_seconds}s` + ) + }, warn_ms) + } + + private resolveEffectiveTimeout(event_timeout: number | null, handler_timeout: number | null): number | null { + if (handler_timeout === null && event_timeout === null) { + return null + } + if (handler_timeout === null) { + return event_timeout + } + if (event_timeout === null) { + return handler_timeout + } + return Math.min(handler_timeout, event_timeout) + } + + // check if an event has been processed (and completed) by this bus + hasProcessedEvent(event: BaseEvent): boolean { + const results = Array.from(event.event_results.values()).filter((result) => result.eventbus_name === this.name) + if (results.length === 0) { + return false + } + return results.every((result) => result.status === 'completed' || result.status === 'error') + } + + private notifyEventParentsOfCompletion(event: BaseEvent): void { + const visited = new Set() + let parent_id = event.event_parent_id + while (parent_id && !visited.has(parent_id)) { + visited.add(parent_id) + const parent = EventBus._all_instances.findEventById(parent_id) + if (!parent) { + break + } + parent.markCompleted(false) + if (parent.event_status !== 'completed') { + break + } + parent_id = parent.event_parent_id + } + } + + // get a proxy wrapper around an Event that will automatically link emitted child events to this bus and handler + // proxy is what gets passed into the handler, if handler does event.bus.emit(...) to dispatch child events, + // the proxy auto-sets event.parent_event_id and event.event_emitted_by_handler_id + getEventProxyScopedToThisBus(event: T, handler_result?: EventResult): T { + const original_event = event._event_original ?? event + const bus = this + const parent_event_id = original_event.event_id + const bus_proxy = new Proxy(bus, { + get(target, prop, receiver) { + if (prop === 'processEventImmediately') { + return (child_event: BaseEvent) => { + const runner = Reflect.get(target, prop, receiver) as (event: BaseEvent, handler_result?: EventResult) => Promise + return runner.call(target, child_event, handler_result) + } + } + if (prop === 'dispatch' || prop === 'emit') { + return (child_event: BaseEvent, event_key?: EventKey) => { + const original_child = child_event._event_original ?? child_event + if (handler_result) { + handler_result.linkEmittedChildEvent(original_child) + } else if (!original_child.event_parent_id) { + // fallback for non-handler scoped dispatch + original_child.event_parent_id = parent_event_id + } + const dispatcher = Reflect.get(target, prop, receiver) as (event: BaseEvent, event_key?: EventKey) => BaseEvent + const dispatched = dispatcher.call(target, original_child, event_key) + return target.getEventProxyScopedToThisBus(dispatched, handler_result) + } + } + return Reflect.get(target, prop, receiver) + }, + }) + const scoped = new Proxy(original_event, { + get(target, prop, receiver) { + if (prop === 'bus') { + return bus_proxy + } + if (prop === '_event_original') { + return target + } + return Reflect.get(target, prop, receiver) + }, + set(target, prop, value) { + if (prop === 'bus') { + return true + } + return Reflect.set(target, prop, value, target) + }, + has(target, prop) { + if (prop === 'bus') { + return true + } + if (prop === '_event_original') { + return true + } + return Reflect.has(target, prop) + }, + }) + + return scoped as T + } + + // force-abort processing of all pending descendants of an event regardless of whether they have already started + cancelPendingDescendants(event: BaseEvent, reason: unknown): void { + const cancellation_cause = this.normalizeCancellationCause(reason) + const visited = new Set() + const cancelChildEvent = (child: BaseEvent): void => { + const original_child = child._event_original ?? child + if (visited.has(original_child.event_id)) { + return + } + visited.add(original_child.event_id) + + // Depth-first: cancel grandchildren before parent so + // eventAreAllChildrenComplete() returns true when we get back up. + for (const grandchild of original_child.event_children) { + cancelChildEvent(grandchild) + } + + const path = Array.isArray(original_child.event_path) ? original_child.event_path : [] + const buses_to_cancel = new Set(path) + for (const bus of EventBus._all_instances) { + if (!buses_to_cancel.has(bus.name)) { + continue + } + bus.cancelEvent(original_child, cancellation_cause) + } + + // Force-complete the child event. In JS we can't stop running async + // handlers, but markCompleted() resolves the done() promise so callers + // aren't blocked waiting for background work to finish. The background + // handler's eventual markCompleted/markError is a no-op (terminal guard). + if (original_child.event_status !== 'completed') { + original_child.markCompleted() + } + } + + for (const child of event.event_children) { + cancelChildEvent(child) + } + } + + private normalizeCancellationCause(reason: unknown): Error { + if (reason instanceof EventHandlerCancelledError || reason instanceof EventHandlerAbortedError) { + return reason.cause instanceof Error ? reason.cause : reason + } + if (reason instanceof EventHandlerTimeoutError) { + return reason + } + return reason instanceof Error ? reason : new Error(String(reason)) + } + + // force-abort processing of an event regardless of whether it is pending or has already started + private cancelEvent(event: BaseEvent, cause: Error): void { + const original_event = event._event_original ?? event + const handler_entries = this.createPendingHandlerResults(original_event) + let updated = false + for (const entry of handler_entries) { + if (entry.result.status === 'pending') { + const cancelled_error = new EventHandlerCancelledError(`Cancelled pending handler due to parent error: ${cause.message}`, { + event_result: entry.result, + cause, + }) + entry.result.markError(cancelled_error) + updated = true + } else if (entry.result.status === 'started') { + // Abort running handlers. In JS we can't actually stop a running async + // function, but marking it as error means the event system treats it as + // done. The background handler will finish silently (its markCompleted/ + // markError call is a no-op once in terminal state). + // + // Exit handler-run ownership immediately so any held lock is released. + // If reacquire is currently pending, exit closes ownership and the + // reacquire path auto-releases when it wakes. + entry.result._lock?.exitHandlerRun() + + const aborted_error = new EventHandlerAbortedError(`Aborted running handler due to parent error: ${cause.message}`, { + event_result: entry.result, + cause, + }) + entry.result.markError(aborted_error) + entry.result.signalAbort(aborted_error) + updated = true + } + } + + let removed = 0 + if (this.pending_event_queue.length > 0) { + const before_len = this.pending_event_queue.length + this.pending_event_queue = this.pending_event_queue.filter( + (queued) => (queued._event_original ?? queued).event_id !== original_event.event_id + ) + removed = before_len - this.pending_event_queue.length + } + + if (removed > 0 && !this.in_flight_event_ids.has(original_event.event_id)) { + original_event.event_pending_bus_count = Math.max(0, original_event.event_pending_bus_count - 1) + } + + if (updated || removed > 0) { + original_event.markCompleted(false) + if (original_event.event_status === 'completed') { + this.notifyEventParentsOfCompletion(original_event) + } + } + } + + private notifyFindListeners(event: BaseEvent): void { + for (const waiter of Array.from(this.find_waiters)) { + if (!this.eventMatchesKey(event, waiter.event_key)) { + continue + } + if (!waiter.matches(event)) { + continue + } + if (waiter.timeout_id) { + clearTimeout(waiter.timeout_id) + } + this.find_waiters.delete(waiter) + waiter.resolve(event) + } + } + + private createPendingHandlerResults(event: BaseEvent): Array<{ + handler: EventHandler + result: EventResult + }> { + const handlers = this.getHandlersForEvent(event) + return handlers.map((entry) => { + const handler_id = entry.id + const existing = event.event_results.get(handler_id) + const result = existing ?? new EventResult({ event, handler: entry }) + if (!existing) { + event.event_results.set(handler_id, result) + } + return { handler: entry, result } + }) + } + + getHandlersForEvent(event: BaseEvent): EventHandler[] { + const handlers: EventHandler[] = [] + + // Exact-match handlers first, then wildcard β€” preserves original ordering + for (const entry of this.handlers.values()) { + if (entry.event_key === event.event_type) { + handlers.push(entry) + } + } + for (const entry of this.handlers.values()) { + if (entry.event_key === '*') { + handlers.push(entry) + } + } + + return handlers + } + + private eventMatchesKey(event: BaseEvent, event_key: EventKey): boolean { + if (event_key === '*') { + return true + } + const normalized = this.normalizeEventKey(event_key) + if (normalized === '*') { + return true + } + return event.event_type === normalized + } + + private normalizeEventKey(event_key: EventKey | '*'): string | '*' { + if (event_key === '*') { + return '*' + } + if (typeof event_key === 'string') { + return event_key + } + const event_type = (event_key as { event_type?: unknown }).event_type + if (typeof event_type === 'string' && event_type.length > 0 && event_type !== 'BaseEvent') { + return event_type + } + throw new Error( + 'bus.on(match_pattern, ...) must be a string event type, "*", or a BaseEvent class, got: ' + JSON.stringify(event_key).slice(0, 30) + ) + } + + private trimHistory(): void { + if (this.max_history_size === null) { + return + } + if (this.event_history.size <= this.max_history_size) { + return + } + + let remaining_overage = this.event_history.size - this.max_history_size + + // First pass: remove completed events (oldest first, Map iterates in insertion order) + for (const [event_id, event] of this.event_history) { + if (remaining_overage <= 0) { + break + } + if (event.event_status !== 'completed') { + continue + } + this.event_history.delete(event_id) + event._gc() + remaining_overage -= 1 + } + + // Second pass: force-remove oldest events regardless of status + let dropped_pending_events = 0 + if (remaining_overage > 0) { + for (const [event_id, event] of this.event_history) { + if (remaining_overage <= 0) { + break + } + if (event.event_status !== 'completed') { + dropped_pending_events += 1 + } + this.event_history.delete(event_id) + event._gc() + remaining_overage -= 1 + } + if (dropped_pending_events > 0) { + console.error( + `[bubus] ⚠️ Bus ${this.toString()} has exceeded its limit of ${this.max_history_size} inflight events and has started dropping oldest pending events! Increase bus.max_history_size or reduce the event volume.` + ) + } + } + } +} diff --git a/bubus-ts/src/event_handler.ts b/bubus-ts/src/event_handler.ts new file mode 100644 index 0000000..a165408 --- /dev/null +++ b/bubus-ts/src/event_handler.ts @@ -0,0 +1,191 @@ +import { v5 as uuidv5 } from 'uuid' + +import type { ConcurrencyMode } from './lock_manager.js' +import type { EventHandlerFunction } from './types.js' +import { BaseEvent } from './base_event.js' +import { EventResult } from './event_result.js' + +const HANDLER_ID_NAMESPACE = uuidv5('bubus-handler', uuidv5.DNS) + +// an entry in the list of event handlers that are registered on a bus +export class EventHandler { + id: string // unique uuidv5 based on hash of bus name, handler name, handler file path:lineno, registered at timestamp, and event key + handler: EventHandlerFunction // the handler function itself + handler_name: string // name of the handler function, or 'anonymous' if the handler is an anonymous/arrow function + handler_file_path?: string // ~/path/to/source/file.ts:123 + handler_timeout: number | null // maximum time in seconds that the handler is allowed to run before it is aborted, defaults to event.event_timeout if not set + event_handler_concurrency?: ConcurrencyMode // per-handler concurrency override + handler_registered_at: string // ISO datetime string version of handler_registered_ts + handler_registered_ts: number // nanosecond monotonic version of handler_registered_at + event_key: string | '*' // event_type string to match against, or '*' to match all events + eventbus_name: string // name of the event bus that the handler is registered on + + constructor(params: { + id?: string + handler: EventHandlerFunction + handler_name: string + handler_file_path?: string + handler_timeout: number | null + event_handler_concurrency?: ConcurrencyMode + handler_registered_at: string + handler_registered_ts: number + event_key: string | '*' + eventbus_name: string + }) { + const handler_file_path = EventHandler.detectHandlerFilePath(params.handler_file_path) + this.id = + params.id ?? + EventHandler.computeHandlerId({ + eventbus_name: params.eventbus_name, + handler_name: params.handler_name, + handler_file_path, + handler_registered_at: params.handler_registered_at, + event_key: params.event_key, + }) + this.handler = params.handler + this.handler_name = params.handler_name + this.handler_file_path = handler_file_path + this.handler_timeout = params.handler_timeout + this.event_handler_concurrency = params.event_handler_concurrency + this.handler_registered_at = params.handler_registered_at + this.handler_registered_ts = params.handler_registered_ts + this.event_key = params.event_key + this.eventbus_name = params.eventbus_name + } + + // compute globally unique handler uuid as a hash of the bus name, handler name, handler file path, registered at timestamp, and event key + static computeHandlerId(params: { + eventbus_name: string + handler_name: string + handler_file_path?: string + handler_registered_at: string + event_key: string | '*' + }): string { + const file_path = EventHandler.detectHandlerFilePath(params.handler_file_path, 'unknown') ?? 'unknown' + const seed = `${params.eventbus_name}|${params.handler_name}|${file_path}|${params.handler_registered_at}|${params.event_key}` + return uuidv5(seed, HANDLER_ID_NAMESPACE) + } + + // "someHandlerName() (~/path/to/source/file.ts:123)" + toString(): string { + const label = this.handler_name && this.handler_name !== 'anonymous' ? `${this.handler_name}()` : `function#${this.id.slice(-4)}()` + const file_path = this.handler_file_path ?? 'unknown' + return `${label} (${file_path})` + } + + // walk the stack trace at registration time to detect the location of the source code file that defines the handler function + // and return the file path and line number as a string, or 'unknown' if the file path cannot be determined + private static detectHandlerFilePath(file_path?: string, fallback: string = 'unknown'): string | undefined { + const extract = (value: string): string => + value.trim().match(/\(([^)]+)\)$/)?.[1] ?? + value.trim().match(/^\s*at\s+(.+)$/)?.[1] ?? + value.trim().match(/^[^@]+@(.+)$/)?.[1] ?? + value.trim() + let resolved_path = file_path ? extract(file_path) : file_path + if (!resolved_path) { + const line = new Error().stack + ?.split('\n') + .map((l) => l.trim()) + .filter(Boolean)[4] + if (line) resolved_path = extract(line) + } + if (!resolved_path) return fallback + const match = resolved_path.match(/^(.*?):(\d+)(?::\d+)?$/) + let normalized = match ? match[1] : resolved_path + const line_number = match?.[2] + if (normalized.startsWith('file://')) { + let path = normalized.slice('file://'.length) + if (path.startsWith('localhost/')) path = path.slice('localhost'.length) + if (!path.startsWith('/')) path = `/${path}` + try { + normalized = decodeURIComponent(path) + } catch { + normalized = path + } + } + normalized = normalized.replace(/\/users\/[^/]+\//i, '~/').replace(/\/home\/[^/]+\//i, '~/') + return line_number ? `${normalized}:${line_number}` : normalized + } +} + +// Generic base TimeoutError used for EventHandlerTimeoutError.cause default value if +export class TimeoutError extends Error { + constructor(message: string) { + super(message) + this.name = 'TimeoutError' + } +} + +// Base class for all errors that can occur while running an event handler +export class EventHandlerError extends Error { + event_result: EventResult + timeout_seconds: number | null + cause: Error + + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { + super(message) + this.name = 'EventHandlerError' + this.event_result = params.event_result + this.cause = params.cause + this.timeout_seconds = params.timeout_seconds ?? this.event_result.event.event_timeout ?? null + } + + get event(): BaseEvent { + return this.event_result.event + } + + get event_type(): string { + return this.event.event_type + } + + get handler_name(): string { + return this.event_result.handler_name + } + + get handler_id(): string { + return this.event_result.handler_id + } + + get event_timeout(): number | null { + return this.event.event_timeout + } +} + +// When the handler itself timed out while executing (due to handler.handler_timeout being exceeded) +export class EventHandlerTimeoutError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause?: Error }) { + super(message, { + event_result: params.event_result, + timeout_seconds: params.timeout_seconds, + cause: params.cause ?? new TimeoutError(message), + }) + this.name = 'EventHandlerTimeoutError' + } +} + +// When a pending handler was cancelled and never run due to an error (e.g. timeout) in a parent scope +export class EventHandlerCancelledError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { + super(message, params) + this.name = 'EventHandlerCancelledError' + } +} + +// When a handler that was already running was aborted due to an error in the parent scope, not due to an error in its own logic / exceeding its own timeout +export class EventHandlerAbortedError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { + super(message, params) + this.name = 'EventHandlerAbortedError' + } +} + +// When a handler run succesfully but returned a value that failed event_result_schema validation +export class EventHandlerResultSchemaError extends EventHandlerError { + raw_value: unknown + + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error; raw_value: unknown }) { + super(message, params) + this.name = 'EventHandlerResultSchemaError' + this.raw_value = params.raw_value + } +} diff --git a/bubus-ts/src/event_result.ts b/bubus-ts/src/event_result.ts new file mode 100644 index 0000000..5d6ef20 --- /dev/null +++ b/bubus-ts/src/event_result.ts @@ -0,0 +1,252 @@ +import { v7 as uuidv7 } from 'uuid' + +import { BaseEvent } from './base_event.js' +import type { EventHandler } from './event_handler.js' +import { HandlerLock, type ConcurrencyMode, withResolvers } from './lock_manager.js' +import type { Deferred } from './lock_manager.js' +import type { EventHandlerFunction, EventResultType } from './types.js' + +// More precise than event.event_status, includes separate 'error' state for handlers that throw errors during execution +export type EventResultStatus = 'pending' | 'started' | 'completed' | 'error' + +export type EventResultData = { + id?: string + status?: EventResultStatus + event_id?: string + handler?: { + id?: string + handler_name?: string + handler_file_path?: string + handler_timeout?: number | null + event_handler_concurrency?: ConcurrencyMode + handler_registered_at?: string + handler_registered_ts?: number + event_key?: string | '*' + eventbus_name?: string + } + started_at?: string + started_ts?: number + completed_at?: string + completed_ts?: number + result?: unknown + error?: unknown + event_children?: string[] +} + +// Object that tracks the pending or completed execution of a single event handler +export class EventResult { + id: string // unique uuidv7 identifier for the event result + status: EventResultStatus // 'pending', 'started', 'completed', or 'error' + event: TEvent // the Event that the handler is processing + handler: EventHandler // the EventHandler object that going to process the event + started_at?: string // ISO datetime string version of started_ts + started_ts?: number // nanosecond monotonic version of started_at + completed_at?: string // ISO datetime string version of completed_ts + completed_ts?: number // nanosecond monotonic version of completed_at + result?: EventResultType // parsed return value from the event handler + error?: unknown // error object thrown by the event handler, or null if the handler completed successfully + event_children: BaseEvent[] // any child events that were emitted during handler execution are captured automatically and stored here to track hierarchy + + // Abort signal: created when handler starts, rejected by signalAbort() to + // interrupt runEventHandler's await via Promise.race. + _abort: Deferred | null + // Handler lock: tracks ownership of the handler concurrency semaphore + // during handler execution. Set by EventBus.runEventHandler, used by + // processEventImmediately for yield-and-reacquire during queue-jumps. + _lock: HandlerLock | null + + constructor(params: { event: TEvent; handler: EventHandler }) { + this.id = uuidv7() + this.status = 'pending' + this.event = params.event + this.handler = params.handler + this.event_children = [] + this.result = undefined + this.error = undefined + this._abort = null + this._lock = null + } + + toString(): string { + return `${this.result ?? 'null'} (${this.status})` + } + + get event_id(): string { + return this.event.event_id + } + + get handler_id(): string { + return this.handler.id + } + + get handler_name(): string { + return this.handler.handler_name + } + + get handler_file_path(): string | undefined { + return this.handler.handler_file_path + } + + get handler_timeout(): number | null { + return this.handler.handler_timeout + } + + get eventbus_name(): string { + return this.handler.eventbus_name + } + + // shortcut for the result value so users can do event_result.value instead of event_result.result + get value(): EventResultType | undefined { + return this.result + } + + // Link a child event emitted by this handler run to the parent event/result. + linkEmittedChildEvent(child_event: BaseEvent): void { + const original_child = child_event._event_original ?? child_event + const parent_event = this.event._event_original ?? this.event + if (!original_child.event_parent_id) { + original_child.event_parent_id = parent_event.event_id + } + if (!original_child.event_emitted_by_handler_id) { + original_child.event_emitted_by_handler_id = this.handler_id + } + if (!this.event_children.some((child) => child.event_id === original_child.event_id)) { + this.event_children.push(original_child) + } + } + + // Get the raw return value from the handler, even if it threw an error / failed validation + get raw_value(): EventResultType | undefined { + if (this.error && (this.error as any).raw_value !== undefined) { + return (this.error as any).raw_value + } + return this.result + } + + // Reject the abort promise, causing runEventHandler's Promise.race to + // throw immediately β€” even if the handler has no timeout. + signalAbort(error: Error): void { + if (this._abort) { + this._abort.reject(error) + this._abort = null + } + } + + // Mark started and return the abort promise for Promise.race. + markStarted(): Promise { + if (!this._abort) { + this._abort = withResolvers() + } + if (this.status === 'pending') { + this.status = 'started' + const { isostring: started_at, ts: started_ts } = BaseEvent.nextTimestamp() + this.started_at = started_at + this.started_ts = started_ts + } + return this._abort.promise + } + + markCompleted(result: EventResultType | undefined): void { + if (this.status === 'completed' || this.status === 'error') return + this.status = 'completed' + this.result = result + const { isostring: completed_at, ts: completed_ts } = BaseEvent.nextTimestamp() + this.completed_at = completed_at + this.completed_ts = completed_ts + } + + markError(error: unknown): void { + if (this.status === 'completed' || this.status === 'error') return + this.status = 'error' + this.error = error + const { isostring: completed_at, ts: completed_ts } = BaseEvent.nextTimestamp() + this.completed_at = completed_at + this.completed_ts = completed_ts + } + + toJSON(): EventResultData { + return { + id: this.id, + status: this.status, + event_id: this.event.event_id, + handler: { + id: this.handler.id, + handler_name: this.handler.handler_name, + handler_file_path: this.handler.handler_file_path, + handler_timeout: this.handler.handler_timeout, + event_handler_concurrency: this.handler.event_handler_concurrency, + handler_registered_at: this.handler.handler_registered_at, + handler_registered_ts: this.handler.handler_registered_ts, + event_key: this.handler.event_key, + eventbus_name: this.handler.eventbus_name, + }, + started_at: this.started_at, + started_ts: this.started_ts, + completed_at: this.completed_at, + completed_ts: this.completed_ts, + result: this.result, + error: this.error, + event_children: this.event_children.map((child) => child.event_id), + } + } + + static fromJSON(event: TEvent, data: unknown): EventResult | null { + if (!data || typeof data !== 'object') { + return null + } + const record = data as EventResultData + const handler_record = record.handler ?? {} + + const handler_stub = { + id: typeof handler_record.id === 'string' ? handler_record.id : `deserialized_handler_${uuidv7()}`, + handler: (() => undefined) as EventHandlerFunction, + handler_name: typeof handler_record.handler_name === 'string' ? handler_record.handler_name : 'deserialized_handler', + handler_file_path: typeof handler_record.handler_file_path === 'string' ? handler_record.handler_file_path : undefined, + handler_timeout: + typeof handler_record.handler_timeout === 'number' || handler_record.handler_timeout === null + ? handler_record.handler_timeout + : null, + event_handler_concurrency: handler_record.event_handler_concurrency, + handler_registered_at: + typeof handler_record.handler_registered_at === 'string' ? handler_record.handler_registered_at : event.event_created_at, + handler_registered_ts: + typeof handler_record.handler_registered_ts === 'number' ? handler_record.handler_registered_ts : event.event_created_ts, + event_key: + handler_record.event_key === '*' || typeof handler_record.event_key === 'string' ? handler_record.event_key : event.event_type, + eventbus_name: typeof handler_record.eventbus_name === 'string' ? handler_record.eventbus_name : (event.bus?.name ?? 'unknown'), + toString: () => { + const name = typeof handler_record.handler_name === 'string' ? handler_record.handler_name : 'deserialized_handler' + const file = typeof handler_record.handler_file_path === 'string' ? handler_record.handler_file_path : 'unknown' + return `${name}() (${file})` + }, + } as unknown as EventHandler + + const result = new EventResult({ event, handler: handler_stub }) + if (typeof record.id === 'string') { + result.id = record.id + } + if (record.status === 'pending' || record.status === 'started' || record.status === 'completed' || record.status === 'error') { + result.status = record.status + } + if (typeof record.started_at === 'string') { + result.started_at = record.started_at + } + if (typeof record.started_ts === 'number') { + result.started_ts = record.started_ts + } + if (typeof record.completed_at === 'string') { + result.completed_at = record.completed_at + } + if (typeof record.completed_ts === 'number') { + result.completed_ts = record.completed_ts + } + if ('result' in record) { + result.result = record.result as EventResultType + } + if ('error' in record) { + result.error = record.error + } + result.event_children = [] + return result + } +} diff --git a/bubus-ts/src/index.ts b/bubus-ts/src/index.ts new file mode 100644 index 0000000..ed57151 --- /dev/null +++ b/bubus-ts/src/index.ts @@ -0,0 +1,13 @@ +export { BaseEvent, BaseEventSchema } from './base_event.js' +export { EventResult } from './event_result.js' +export { EventBus } from './event_bus.js' +export { + EventHandlerTimeoutError, + EventHandlerCancelledError, + EventHandlerAbortedError, + EventHandlerResultSchemaError, +} from './event_handler.js' +export type { ConcurrencyMode, EventBusInterfaceForLockManager } from './lock_manager.js' +export type { EventClass, EventHandlerFunction as EventHandler, EventKey, EventStatus, FindOptions, FindWindow } from './types.js' +export { retry, clearSemaphoreRegistry, RetryTimeoutError, SemaphoreTimeoutError } from './retry.js' +export type { RetryOptions } from './retry.js' diff --git a/bubus-ts/src/lock_manager.ts b/bubus-ts/src/lock_manager.ts new file mode 100644 index 0000000..d814368 --- /dev/null +++ b/bubus-ts/src/lock_manager.ts @@ -0,0 +1,377 @@ +import type { BaseEvent } from './base_event.js' +import type { EventHandler } from './event_handler.js' +import type { EventResult } from './event_result.js' + +// ─── Deferred / withResolvers ──────────────────────────────────────────────── + +export type Deferred = { + promise: Promise + resolve: (value: T | PromiseLike) => void + reject: (reason?: unknown) => void +} + +export const withResolvers = (): Deferred => { + if (typeof Promise.withResolvers === 'function') { + return Promise.withResolvers() + } + let resolve!: (value: T | PromiseLike) => void + let reject!: (reason?: unknown) => void + const promise = new Promise((resolve_fn, reject_fn) => { + resolve = resolve_fn + reject = reject_fn + }) + return { promise, resolve, reject } +} + +// ─── Concurrency modes ────────────────────────────────────────────────────── + +export const CONCURRENCY_MODES = ['global-serial', 'bus-serial', 'parallel', 'auto'] as const +export type ConcurrencyMode = (typeof CONCURRENCY_MODES)[number] // union type of the values in the CONCURRENCY_MODES array +export const DEFAULT_CONCURRENCY_MODE = 'bus-serial' + +export const resolveConcurrencyMode = (mode: ConcurrencyMode | undefined, fallback: ConcurrencyMode): ConcurrencyMode => { + const normalized_fallback = fallback === 'auto' ? DEFAULT_CONCURRENCY_MODE : fallback + if (!mode || mode === 'auto') { + return normalized_fallback + } + return mode +} + +// ─── AsyncSemaphore ────────────────────────────────────────────────────────── + +export class AsyncSemaphore { + size: number + in_use: number + waiters: Array<() => void> + + constructor(size: number) { + this.size = size + this.in_use = 0 + this.waiters = [] + } + + async acquire(): Promise { + if (this.size === Infinity) { + return + } + if (this.in_use < this.size) { + this.in_use += 1 + return + } + await new Promise((resolve) => { + this.waiters.push(resolve) + }) + this.in_use += 1 + } + + release(): void { + if (this.size === Infinity) { + return + } + this.in_use = Math.max(0, this.in_use - 1) + const next = this.waiters.shift() + if (next) { + next() + } + } +} + +export const semaphoreForMode = ( + mode: ConcurrencyMode, + global_semaphore: AsyncSemaphore, + bus_semaphore: AsyncSemaphore +): AsyncSemaphore | null => { + if (mode === 'parallel') { + return null + } + if (mode === 'global-serial') { + return global_semaphore + } + if (mode === 'bus-serial') { + return bus_semaphore + } + return bus_semaphore +} + +export const runWithSemaphore = async (semaphore: AsyncSemaphore | null, fn: () => Promise): Promise => { + if (!semaphore) { + return await fn() + } + await semaphore.acquire() + try { + return await fn() + } finally { + semaphore.release() + } +} + +// ─── HandlerLock ───────────────────────────────────────────────────────────── + +export type HandlerExecutionState = 'held' | 'yielded' | 'closed' + +// Tracks a single handler execution's ownership of a semaphore lock. +// Reacquire is race-safe: if the handler exits while waiting to reclaim, +// the reclaimed lock is immediately released to avoid leaks. +export class HandlerLock { + private semaphore: AsyncSemaphore | null + private state: HandlerExecutionState + + constructor(semaphore: AsyncSemaphore | null) { + this.semaphore = semaphore + this.state = 'held' + } + + // used by EventBus.processEventImmediately to yield the parent handler's lock to the child event so it can be processed immediately + yieldHandlerLockForChildRun(): boolean { + if (!this.semaphore || this.state !== 'held') { + return false + } + this.state = 'yielded' + this.semaphore.release() + return true + } + + // used by EventBus.processEventImmediately to reacquire the handler lock after the child event has been processed + async reclaimHandlerLockIfRunning(): Promise { + if (!this.semaphore || this.state !== 'yielded') { + return false + } + await this.semaphore.acquire() + if (this.state !== 'yielded') { + // Handler exited while this reacquire was pending. + this.semaphore.release() + return false + } + this.state = 'held' + return true + } + + // used by EventBus.runEventHandler to exit the handler lock after the handler has finished executing + exitHandlerRun(): void { + if (this.state === 'closed') { + return + } + const should_release = !!this.semaphore && this.state === 'held' + this.state = 'closed' + if (should_release) { + this.semaphore!.release() + } + } + + // used by EventBus.processEventImmediately to yield the handler lock and reacquire it after the child event has been processed + async runQueueJump(fn: () => Promise): Promise { + const yielded = this.yieldHandlerLockForChildRun() + try { + return await fn() + } finally { + if (yielded) { + await this.reclaimHandlerLockIfRunning() + } + } + } +} + +// ─── LockManager ───────────────────────────────────────────────────────────── + +// Interface that must be implemented by the EventBus class to be used by the LockManager +export type EventBusInterfaceForLockManager = { + isIdleAndQueueEmpty: () => boolean + event_concurrency_default: ConcurrencyMode + event_handler_concurrency_default: ConcurrencyMode +} + +// The LockManager is responsible for managing the concurrency of events and handlers +export class LockManager { + static global_event_semaphore = new AsyncSemaphore(1) // used for the global-serial concurrency mode + static global_handler_semaphore = new AsyncSemaphore(1) // used for the global-serial concurrency mode + + private bus: EventBusInterfaceForLockManager // Live bus reference; used to read defaults and idle state. + readonly bus_event_semaphore: AsyncSemaphore // Per-bus event semaphore; created with LockManager and never swapped. + readonly bus_handler_semaphore: AsyncSemaphore // Per-bus handler semaphore; created with LockManager and never swapped. + + private pause_depth: number // Re-entrant pause counter; increments on requestPause, decrements on release. + private pause_waiters: Array<() => void> // Resolvers for waitUntilRunloopResumed; drained when pause_depth hits 0. + private queue_jump_pause_releases: WeakMap void> // Per-handler pause release for queue-jump; cleared on handler exit. + private active_handler_results: EventResult[] // Stack of active handler results for "inside handler" detection. + + private idle_waiters: Array<() => void> // Resolvers waiting for stable idle; cleared when idle confirmed. + private idle_check_pending: boolean // Debounce flag to avoid scheduling redundant idle checks. + private idle_check_streak: number // Counts consecutive idle checks; used to require two ticks of idle. + + constructor(bus: EventBusInterfaceForLockManager) { + this.bus = bus + this.bus_event_semaphore = new AsyncSemaphore(1) // used for the bus-serial concurrency mode + this.bus_handler_semaphore = new AsyncSemaphore(1) // used for the bus-serial concurrency mode + + this.pause_depth = 0 + this.pause_waiters = [] + this.queue_jump_pause_releases = new WeakMap() + this.active_handler_results = [] + + this.idle_waiters = [] + this.idle_check_pending = false + this.idle_check_streak = 0 + } + + // Low-level runloop pause: increments a re-entrant counter and returns a release + // function. Used for broad, bus-scoped pauses (e.g. runImmediatelyAcrossBuses). + requestPause(): () => void { + this.pause_depth += 1 + let released = false + return () => { + if (released) { + return + } + released = true + this.pause_depth = Math.max(0, this.pause_depth - 1) + if (this.pause_depth !== 0) { + return + } + const waiters = this.pause_waiters + this.pause_waiters = [] + for (const resolve of waiters) { + resolve() + } + } + } + + waitUntilRunloopResumed(): Promise { + if (this.pause_depth === 0) { + return Promise.resolve() + } + return new Promise((resolve) => { + this.pause_waiters.push(resolve) + }) + } + + isPaused(): boolean { + return this.pause_depth > 0 + } + + enterActiveHandlerContext(result: EventResult): void { + this.active_handler_results.push(result) + } + + exitActiveHandlerContext(result: EventResult): void { + const idx = this.active_handler_results.indexOf(result) + if (idx >= 0) { + this.active_handler_results.splice(idx, 1) + } + } + + getActiveHandlerResult(): EventResult | undefined { + return this.active_handler_results[this.active_handler_results.length - 1] + } + + // Per-bus check: true only if this specific bus has a handler on its stack. + // For cross-bus queue-jumping, EventBus.processEventImmediately uses getParentEventResultAcrossAllBusses() + // to walk up the parent event tree, and the bus proxy passes handler_result + // to processEventImmediately so it can yield/reacquire the correct semaphore. + isAnyHandlerActive(): boolean { + return this.active_handler_results.length > 0 + } + + // Queue-jump pause: wraps requestPause with per-handler deduping so repeated + // calls during the same handler run don't stack pauses. Released via + // releaseRunloopPauseForQueueJumpEvent when the handler finishes. + requestRunloopPauseForQueueJumpEvent(result: EventResult): void { + if (this.queue_jump_pause_releases.has(result)) { + return + } + this.queue_jump_pause_releases.set(result, this.requestPause()) + } + + // release the eventt bus runloop pause for a given event result if there is a pause request for it + // i.e. if it was a queue-jump event that was processed immediately, notify the runloop to resume + releaseRunloopPauseForQueueJumpEvent(result: EventResult): void { + const release_pause = this.queue_jump_pause_releases.get(result) + if (!release_pause) { + return + } + this.queue_jump_pause_releases.delete(result) + release_pause() + } + + waitForIdle(): Promise { + if (this.bus.isIdleAndQueueEmpty()) { + return Promise.resolve() + } + return new Promise((resolve) => { + this.idle_waiters.push(resolve) + this.scheduleIdleCheck() + }) + } + + // Called by EventBus.markEventCompleted and EventBus.markHandlerCompleted to notify + // waitUntilIdle() callers that the bus may now be idle. + notifyIdleListeners(): void { + // Fast-path: most completions have no waitUntilIdle() callers waiting, + // so skip expensive idle snapshot scans in that common case. + if (this.idle_waiters.length === 0) { + this.idle_check_streak = 0 + return + } + + if (!this.bus.isIdleAndQueueEmpty()) { + this.idle_check_streak = 0 + if (this.idle_waiters.length > 0) { + this.scheduleIdleCheck() + } + return + } + + this.idle_check_streak += 1 + if (this.idle_check_streak < 2) { + if (this.idle_waiters.length > 0) { + this.scheduleIdleCheck() + } + return + } + + this.idle_check_streak = 0 + const waiters = this.idle_waiters + this.idle_waiters = [] + for (const resolve of waiters) { + resolve() + } + } + + getSemaphoreForEvent(event: BaseEvent): AsyncSemaphore | null { + const resolved = resolveConcurrencyMode(event.event_concurrency, this.bus.event_concurrency_default) + return semaphoreForMode(resolved, LockManager.global_event_semaphore, this.bus_event_semaphore) + } + + getSemaphoreForHandler(event: BaseEvent, handler?: Pick): AsyncSemaphore | null { + const event_override = + event.event_handler_concurrency && event.event_handler_concurrency !== 'auto' ? event.event_handler_concurrency : undefined + const handler_override = + handler?.event_handler_concurrency && handler.event_handler_concurrency !== 'auto' ? handler.event_handler_concurrency : undefined + const fallback = this.bus.event_handler_concurrency_default + const resolved = resolveConcurrencyMode(event_override ?? handler_override ?? fallback, fallback) + return semaphoreForMode(resolved, LockManager.global_handler_semaphore, this.bus_handler_semaphore) + } + + // Schedules a debounced idle check to run after a short delay. Used to gate + // waitUntilIdle() calls during handler execution and after event completion. + private scheduleIdleCheck(): void { + if (this.idle_check_pending) { + return + } + this.idle_check_pending = true + setTimeout(() => { + this.idle_check_pending = false + this.notifyIdleListeners() + }, 0) + } + + // Reset all state to initial values + clear(): void { + this.pause_depth = 0 + this.pause_waiters = [] + this.queue_jump_pause_releases = new WeakMap() + this.active_handler_results = [] + this.idle_waiters = [] + this.idle_check_pending = false + this.idle_check_streak = 0 + } +} diff --git a/bubus-ts/src/logging.ts b/bubus-ts/src/logging.ts new file mode 100644 index 0000000..8d242e7 --- /dev/null +++ b/bubus-ts/src/logging.ts @@ -0,0 +1,242 @@ +import { BaseEvent } from './base_event.js' +import { EventResult } from './event_result.js' +import { EventHandlerCancelledError, EventHandlerTimeoutError } from './event_handler.js' + +type LogTreeBus = { + name: string + event_history: Map +} + +export const logTree = (bus: LogTreeBus): string => { + const parent_to_children = new Map() + + const add_child = (parent_id: string, child: BaseEvent): void => { + const existing = parent_to_children.get(parent_id) ?? [] + existing.push(child) + parent_to_children.set(parent_id, existing) + } + + const root_events: BaseEvent[] = [] + const seen = new Set() + + for (const event of bus.event_history.values()) { + const parent_id = event.event_parent_id + if (!parent_id || parent_id === event.event_id || !bus.event_history.has(parent_id)) { + if (!seen.has(event.event_id)) { + root_events.push(event) + seen.add(event.event_id) + } + } + } + + if (root_events.length === 0) { + return '(No events in history)' + } + + const nodes_by_id = new Map() + for (const root of root_events) { + nodes_by_id.set(root.event_id, root) + for (const descendant of root.event_descendants) { + nodes_by_id.set(descendant.event_id, descendant) + } + } + + for (const node of nodes_by_id.values()) { + const parent_id = node.event_parent_id + if (!parent_id || parent_id === node.event_id) { + continue + } + if (!nodes_by_id.has(parent_id)) { + continue + } + add_child(parent_id, node) + } + + for (const children of parent_to_children.values()) { + children.sort((a, b) => (a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0)) + } + + const lines: string[] = [] + lines.push(`πŸ“Š Event History Tree for ${bus.name}`) + lines.push('='.repeat(80)) + + root_events.sort((a, b) => (a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0)) + const visited = new Set() + root_events.forEach((event, index) => { + lines.push(buildTreeLine(event, '', index === root_events.length - 1, parent_to_children, visited)) + }) + + lines.push('='.repeat(80)) + + return lines.join('\n') +} + +export const buildTreeLine = ( + event: BaseEvent, + indent: string, + is_last: boolean, + parent_to_children: Map, + visited: Set +): string => { + const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' + const status_icon = event.event_status === 'completed' ? 'βœ…' : event.event_status === 'started' ? 'πŸƒ' : '⏳' + + const created_at = formatTimestamp(event.event_created_at) + let timing = `[${created_at}` + if (event.event_completed_at) { + const created_ms = Date.parse(event.event_created_at) + const completed_ms = Date.parse(event.event_completed_at) + if (!Number.isNaN(created_ms) && !Number.isNaN(completed_ms)) { + const duration = (completed_ms - created_ms) / 1000 + timing += ` (${duration.toFixed(3)}s)` + } + } + timing += ']' + + const line = `${indent}${connector}${status_icon} ${event.event_type}#${event.event_id.slice(-4)} ${timing}` + + if (visited.has(event.event_id)) { + return line + } + visited.add(event.event_id) + + const extension = is_last ? ' ' : 'β”‚ ' + const new_indent = indent + extension + + const result_items: Array<{ type: 'result'; result: EventResult } | { type: 'child'; child: BaseEvent }> = [] + for (const result of event.event_results.values()) { + result_items.push({ type: 'result', result }) + } + const children = parent_to_children.get(event.event_id) ?? [] + const printed_child_ids = new Set(event.event_results.size > 0 ? event.event_results.keys() : []) + for (const child of children) { + if (!printed_child_ids.has(child.event_id) && !child.event_emitted_by_handler_id) { + result_items.push({ type: 'child', child }) + printed_child_ids.add(child.event_id) + } + } + + if (result_items.length === 0) { + return line + } + + const child_lines: string[] = [] + result_items.forEach((item, index) => { + const is_last_item = index === result_items.length - 1 + if (item.type === 'result') { + child_lines.push(buildResultLine(item.result, new_indent, is_last_item, parent_to_children, visited)) + } else { + child_lines.push(buildTreeLine(item.child, new_indent, is_last_item, parent_to_children, visited)) + } + }) + + return [line, ...child_lines].join('\n') +} + +export const buildResultLine = ( + result: EventResult, + indent: string, + is_last: boolean, + parent_to_children: Map, + visited: Set +): string => { + const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' + const status_icon = result.status === 'completed' ? 'βœ…' : result.status === 'error' ? '❌' : result.status === 'started' ? 'πŸƒ' : '⏳' + + const handler_label = + result.handler_name && result.handler_name !== 'anonymous' + ? result.handler_name + : result.handler_file_path + ? result.handler_file_path + : 'anonymous' + const handler_display = `${result.eventbus_name}.${handler_label}#${result.handler_id.slice(-4)}` + let line = `${indent}${connector}${status_icon} ${handler_display}` + + if (result.started_at) { + line += ` [${formatTimestamp(result.started_at)}` + if (result.completed_at) { + const started_ms = Date.parse(result.started_at) + const completed_ms = Date.parse(result.completed_at) + if (!Number.isNaN(started_ms) && !Number.isNaN(completed_ms)) { + const duration = (completed_ms - started_ms) / 1000 + line += ` (${duration.toFixed(3)}s)` + } + } + line += ']' + } + + if (result.status === 'error' && result.error) { + if (result.error instanceof EventHandlerTimeoutError) { + line += ` ⏱️ Timeout: ${result.error.message}` + } else if (result.error instanceof EventHandlerCancelledError) { + line += ` 🚫 Cancelled: ${result.error.message}` + } else { + const error_name = result.error instanceof Error ? result.error.name : 'Error' + const error_message = result.error instanceof Error ? result.error.message : String(result.error) + line += ` ☠️ ${error_name}: ${error_message}` + } + } else if (result.status === 'completed') { + line += ` β†’ ${formatResultValue(result.result)}` + } + + const extension = is_last ? ' ' : 'β”‚ ' + const new_indent = indent + extension + + if (result.event_children.length === 0) { + return line + } + + const child_lines: string[] = [] + const direct_children = result.event_children + const parent_children = parent_to_children.get(result.event_id) ?? [] + const emitted_children = parent_children.filter((child) => child.event_emitted_by_handler_id === result.handler_id) + const children_by_id = new Map() + direct_children.forEach((child) => { + children_by_id.set(child.event_id, child) + }) + emitted_children.forEach((child) => { + if (!children_by_id.has(child.event_id)) { + children_by_id.set(child.event_id, child) + } + }) + const children_to_print = Array.from(children_by_id.values()).filter((child) => !visited.has(child.event_id)) + + children_to_print.forEach((child, index) => { + child_lines.push(buildTreeLine(child, new_indent, index === children_to_print.length - 1, parent_to_children, visited)) + }) + + return [line, ...child_lines].join('\n') +} + +export const formatTimestamp = (value?: string): string => { + if (!value) { + return 'N/A' + } + const date = new Date(value) + if (Number.isNaN(date.getTime())) { + return 'N/A' + } + return date.toISOString().slice(11, 23) +} + +export const formatResultValue = (value: unknown): string => { + if (value === null || value === undefined) { + return 'None' + } + if (value instanceof BaseEvent) { + return `Event(${value.event_type}#${value.event_id.slice(-4)})` + } + if (typeof value === 'string') { + return JSON.stringify(value) + } + if (typeof value === 'number' || typeof value === 'boolean') { + return String(value) + } + if (Array.isArray(value)) { + return `list(${value.length} items)` + } + if (typeof value === 'object') { + return `dict(${Object.keys(value as Record).length} items)` + } + return `${typeof value}(...)` +} diff --git a/bubus-ts/src/retry.ts b/bubus-ts/src/retry.ts new file mode 100644 index 0000000..8ef1542 --- /dev/null +++ b/bubus-ts/src/retry.ts @@ -0,0 +1,346 @@ +import { AsyncSemaphore } from './lock_manager.js' +import { createAsyncLocalStorage, type AsyncLocalStorageLike } from './async_context.js' + +// ─── Types ─────────────────────────────────────────────────────────────────── + +export interface RetryOptions { + /** Total number of attempts including the initial call (1 = no retry, 3 = up to 2 retries). Default: 1 */ + max_attempts?: number + + /** Seconds to wait between retries. Default: 0 */ + retry_after?: number + + /** Multiplier applied to retry_after after each attempt for exponential backoff. Default: 1.0 (constant delay) */ + retry_backoff_factor?: number + + /** Only retry when the thrown error matches one of these matchers. Accepts error class constructors, + * string error names (matched against error.name), or RegExp patterns (tested against String(error)). + * Default: undefined (retry on any error) */ + retry_on_errors?: Array<(new (...args: any[]) => Error) | string | RegExp> + + /** Per-attempt timeout in seconds. Default: undefined (no per-attempt timeout) */ + timeout?: number | null + + /** Maximum concurrent executions sharing this semaphore. Default: undefined (no concurrency limit) */ + semaphore_limit?: number | null + + /** Semaphore identifier. Functions with the same name share the same concurrency slot pool. Default: function name */ + semaphore_name?: string | null + + /** If true, proceed without concurrency limit when semaphore acquisition times out. Default: true */ + semaphore_lax?: boolean + + /** Semaphore scoping strategy. Default: 'global' + * - 'global': all calls share one semaphore (keyed by semaphore_name) + * - 'class': all instances of the same class share one semaphore (keyed by className.semaphore_name) + * - 'instance': each object instance gets its own semaphore (keyed by instanceId.semaphore_name) + * 'class' and 'instance' require `this` to be an object; they fall back to 'global' for standalone calls. */ + semaphore_scope?: 'global' | 'class' | 'instance' + + /** Maximum seconds to wait for semaphore acquisition. Default: undefined β†’ timeout * max(1, limit - 1) */ + semaphore_timeout?: number | null +} + +// ─── Errors ────────────────────────────────────────────────────────────────── + +/** Thrown when a single attempt exceeds the per-attempt timeout. */ +export class RetryTimeoutError extends Error { + timeout_seconds: number + attempt: number + + constructor(message: string, params: { timeout_seconds: number; attempt: number }) { + super(message) + this.name = 'RetryTimeoutError' + this.timeout_seconds = params.timeout_seconds + this.attempt = params.attempt + } +} + +/** Thrown (when semaphore_lax=false) if the semaphore cannot be acquired within the timeout. */ +export class SemaphoreTimeoutError extends Error { + semaphore_name: string + semaphore_limit: number + timeout_seconds: number + + constructor(message: string, params: { semaphore_name: string; semaphore_limit: number; timeout_seconds: number }) { + super(message) + this.name = 'SemaphoreTimeoutError' + this.semaphore_name = params.semaphore_name + this.semaphore_limit = params.semaphore_limit + this.timeout_seconds = params.timeout_seconds + } +} + +// ─── Re-entrancy tracking via AsyncLocalStorage ────────────────────────────── +// +// Prevents deadlocks when a retry()-wrapped function calls another retry()-wrapped +// function that shares the same semaphore (or calls itself recursively). +// +// Each async call stack tracks which semaphore names it currently holds. When a +// nested call encounters a semaphore it already holds, it skips acquisition and +// runs directly within the parent's slot. +// +// Uses the same AsyncLocalStorage polyfill as the rest of bubus (see async_context.ts) +// so it works in Node.js and gracefully degrades to a no-op in browsers. + +type ReentrantStore = Set + +// Separate AsyncLocalStorage instance for retry re-entrancy tracking. +// Created via the shared factory in async_context.ts (returns null in browsers). +const retry_context_storage: AsyncLocalStorageLike | null = createAsyncLocalStorage() + +function getHeldSemaphores(): ReentrantStore { + return (retry_context_storage?.getStore() as ReentrantStore | undefined) ?? new Set() +} + +function runWithHeldSemaphores(held: ReentrantStore, fn: () => T): T { + if (!retry_context_storage) return fn() + return retry_context_storage.run(held, fn) +} + +// ─── Semaphore scope helpers ───────────────────────────────────────────────── + +let _next_instance_id = 1 +const _instance_ids = new WeakMap() + +function scopedSemaphoreKey(base_name: string, scope: 'global' | 'class' | 'instance', context: unknown): string { + if (scope === 'class' && context && typeof context === 'object') { + return `${(context as object).constructor?.name ?? 'Object'}.${base_name}` + } + if (scope === 'instance' && context && typeof context === 'object') { + let id = _instance_ids.get(context as object) + if (id === undefined) { + id = _next_instance_id++ + _instance_ids.set(context as object, id) + } + return `${id}.${base_name}` + } + return base_name +} + +// ─── Global semaphore registry ─────────────────────────────────────────────── + +const SEMAPHORE_REGISTRY = new Map() + +function getOrCreateSemaphore(name: string, limit: number): AsyncSemaphore { + const existing = SEMAPHORE_REGISTRY.get(name) + if (existing && existing.size === limit) return existing + const sem = new AsyncSemaphore(limit) + SEMAPHORE_REGISTRY.set(name, sem) + return sem +} + +/** Reset the global semaphore registry. Useful in tests. */ +export function clearSemaphoreRegistry(): void { + SEMAPHORE_REGISTRY.clear() +} + +// ─── retry() decorator / higher-order wrapper ──────────────────────────────── +// +// Usage as a higher-order function (works on any async function): +// +// const fetchWithRetry = retry({ max_attempts: 3, retry_after: 1 })(async (url: string) => { +// return await fetch(url) +// }) +// +// Usage as a TC39 Stage 3 decorator on class methods (TS 5.0+): +// +// class ApiClient { +// @retry({ max_attempts: 3, retry_after: 1 }) +// async fetchData(): Promise { ... } +// } +// +// Usage on event bus handlers: +// +// bus.on(MyEvent, retry({ max_attempts: 3 })(async (event) => { +// await riskyOperation(event.data) +// })) + +export function retry(options: RetryOptions = {}) { + const { + max_attempts = 1, + retry_after = 0, + retry_backoff_factor = 1.0, + retry_on_errors, + timeout, + semaphore_limit, + semaphore_name: semaphore_name_option, + semaphore_lax = true, + semaphore_scope = 'global', + semaphore_timeout, + } = options + + return function decorator any>(target: T, _context?: ClassMethodDecoratorContext): T { + const fn_name = target.name || (_context?.name as string) || 'anonymous' + const sem_name = semaphore_name_option ?? fn_name + const effective_max_attempts = Math.max(1, max_attempts) + const effective_retry_after = Math.max(0, retry_after) + + async function retryWrapper(this: any, ...args: any[]): Promise { + // ── Resolve scoped semaphore key at call time (uses `this` for class/instance scopes) ── + const scoped_key = scopedSemaphoreKey(sem_name, semaphore_scope, this) + + // ── Check re-entrancy: skip semaphore if we already hold it in this async context ── + const held = getHeldSemaphores() + const needs_semaphore = semaphore_limit != null && semaphore_limit > 0 + const is_reentrant = needs_semaphore && held.has(scoped_key) + + // ── Semaphore acquisition (held across all retry attempts, skipped if re-entrant) ── + let semaphore: AsyncSemaphore | null = null + let semaphore_acquired = false + + if (needs_semaphore && !is_reentrant) { + semaphore = getOrCreateSemaphore(scoped_key, semaphore_limit!) + + const effective_sem_timeout = + semaphore_timeout != null + ? semaphore_timeout + : timeout != null + ? timeout * Math.max(1, semaphore_limit! - 1) + : null + + if (effective_sem_timeout != null && effective_sem_timeout > 0) { + semaphore_acquired = await acquireWithTimeout(semaphore, effective_sem_timeout * 1000) + if (!semaphore_acquired) { + if (!semaphore_lax) { + throw new SemaphoreTimeoutError( + `Failed to acquire semaphore "${scoped_key}" within ${effective_sem_timeout}s (limit=${semaphore_limit})`, + { semaphore_name: scoped_key, semaphore_limit: semaphore_limit!, timeout_seconds: effective_sem_timeout } + ) + } + // lax mode: proceed without concurrency limit + } + } else { + // No timeout configured: wait indefinitely for a slot + await semaphore.acquire() + semaphore_acquired = true + } + } + + // ── Build the set of held semaphores for nested calls ── + const new_held = new Set(held) + if (semaphore_acquired) { + new_held.add(scoped_key) + } + + // ── Retry loop (runs inside the semaphore and re-entrancy context) ── + const run_retry_loop = async (): Promise => { + for (let attempt = 1; attempt <= effective_max_attempts; attempt++) { + try { + if (timeout != null && timeout > 0) { + return await withTimeout(() => Promise.resolve(target.apply(this, args)), timeout * 1000, attempt) + } else { + return await Promise.resolve(target.apply(this, args)) + } + } catch (error) { + // Check if this error type should trigger a retry + if (retry_on_errors && retry_on_errors.length > 0) { + const is_retryable = retry_on_errors.some((matcher) => + typeof matcher === 'string' + ? (error as Error)?.name === matcher + : matcher instanceof RegExp + ? matcher.test(String(error)) + : error instanceof matcher + ) + if (!is_retryable) throw error + } + + // Last attempt: rethrow + if (attempt >= effective_max_attempts) throw error + + // Wait before next attempt with exponential backoff + const delay_seconds = effective_retry_after * Math.pow(retry_backoff_factor, attempt - 1) + if (delay_seconds > 0) { + await sleep(delay_seconds * 1000) + } + } + } + + // Unreachable, but satisfies the type checker + throw new Error(`retry(${fn_name}): unexpected end of retry loop`) + } + + try { + return await runWithHeldSemaphores(new_held, run_retry_loop) + } finally { + if (semaphore_acquired && semaphore) { + semaphore.release() + } + } + } + + Object.defineProperty(retryWrapper, 'name', { value: fn_name, configurable: true }) + return retryWrapper as unknown as T + } +} + +// ─── Internal helpers ──────────────────────────────────────────────────────── + +/** + * Try to acquire a semaphore within a timeout. Returns true if acquired, false if timed out. + * If the semaphore is acquired after the timeout (due to the waiter remaining queued), + * it is immediately released to avoid leaking slots. + */ +async function acquireWithTimeout(semaphore: AsyncSemaphore, timeout_ms: number): Promise { + return new Promise((resolve) => { + let settled = false + + const timer = setTimeout(() => { + if (!settled) { + settled = true + resolve(false) + } + }, timeout_ms) + + semaphore.acquire().then(() => { + if (!settled) { + settled = true + clearTimeout(timer) + resolve(true) + } else { + // Acquired after timeout fired β€” release immediately to avoid slot leak + semaphore.release() + } + }) + }) +} + +/** Run fn() with a timeout. Rejects with RetryTimeoutError if the timeout fires first. */ +async function withTimeout(fn: () => Promise, timeout_ms: number, attempt: number): Promise { + return new Promise((resolve, reject) => { + let settled = false + + const timer = setTimeout(() => { + if (!settled) { + settled = true + reject( + new RetryTimeoutError(`Timed out after ${timeout_ms / 1000}s (attempt ${attempt})`, { + timeout_seconds: timeout_ms / 1000, + attempt, + }) + ) + } + }, timeout_ms) + + fn().then( + (value) => { + if (!settled) { + settled = true + clearTimeout(timer) + resolve(value) + } + }, + (error) => { + if (!settled) { + settled = true + clearTimeout(timer) + reject(error) + } + } + ) + }) +} + +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)) +} diff --git a/bubus-ts/src/type_inference.test.ts b/bubus-ts/src/type_inference.test.ts new file mode 100644 index 0000000..87338db --- /dev/null +++ b/bubus-ts/src/type_inference.test.ts @@ -0,0 +1,47 @@ +/* eslint-disable @typescript-eslint/no-unused-vars */ +// Do not remove the unused type/const names below; they are used to test type inference at compile time. + +import { z } from 'zod' + +import { BaseEvent } from './base_event.js' +import { EventBus } from './event_bus.js' +import type { EventResult } from './event_result.js' +import type { EventResultType } from './types.js' + +type IsEqual = (() => T extends A ? 1 : 2) extends () => T extends B ? 1 : 2 ? true : false +type Assert = T + +const InferableResultEvent = BaseEvent.extend('InferableResultEvent', { + target_id: z.string(), + event_result_schema: z.object({ ok: z.boolean() }), +}) + +type InferableResult = EventResultType> +type _assert_inferable_result = Assert> +type InferableEventResultEntry = + InstanceType['event_results'] extends Map ? TResultEntry : never +type _assert_inferable_event_result_entry = Assert< + IsEqual>> +> +type InferableEventResultValue = InferableEventResultEntry extends { result?: infer TResultValue } ? TResultValue : never +type _assert_inferable_event_result_value = Assert> + +const NoSchemaEvent = BaseEvent.extend('NoSchemaEventForInference', {}) +type NoSchemaResult = EventResultType> +type _assert_no_schema_result = Assert> + +const bus = new EventBus('TypeInferenceBus') + +bus.on(InferableResultEvent, (event) => { + const target: string = event.target_id + return { ok: true } +}) + +bus.on(InferableResultEvent, () => undefined) + +// @ts-expect-error non-void return must match event_result_schema for inferable event keys +bus.on(InferableResultEvent, () => 'not-ok') + +// String/wildcard keys remain best-effort and do not strongly enforce return shapes. +bus.on('InferableResultEvent', () => 'anything') +bus.on('*', () => 123) diff --git a/bubus-ts/src/types.ts b/bubus-ts/src/types.ts new file mode 100644 index 0000000..118c5ca --- /dev/null +++ b/bubus-ts/src/types.ts @@ -0,0 +1,94 @@ +import { z } from 'zod' +import type { BaseEvent } from './base_event.js' + +export type EventStatus = 'pending' | 'started' | 'completed' + +export type EventClass = { event_type?: string } & (new (...args: any[]) => T) + +export type EventKey = string | EventClass + +export type EventWithResult = BaseEvent & { __event_result_type__?: TResult } + +export type EventResultType = TEvent extends { __event_result_type__?: infer TResult } ? TResult : unknown + +export type EventHandlerFunction = ( + event: T +) => void | EventResultType | Promise> + +// For string and wildcard subscriptions we cannot reliably infer which event +// type will arrive, so return type checking intentionally degrades to unknown. +export type UntypedEventHandlerFunction = (event: T) => void | unknown | Promise + +export type FindWindow = boolean | number + +export type FindOptions = { + past?: FindWindow + future?: FindWindow + child_of?: BaseEvent | null +} + +const WRAPPER_TYPES = new Set(['optional', 'nullable', 'default', 'catch', 'prefault', 'readonly', 'nonoptional', 'exact_optional']) + +const OBJECT_LIKE_TYPES = new Set(['object', 'record', 'map', 'set']) + +const TYPE_ALIASES: Record = { + enum: 'string', + tuple: 'array', + void: 'undefined', + lazy: 'unknown', +} + +export const isZodSchema = (value: unknown): value is z.ZodTypeAny => !!value && typeof (value as z.ZodTypeAny).safeParse === 'function' + +export const extractZodShape = (raw: Record): z.ZodRawShape => { + const shape: Record = {} + for (const [key, value] of Object.entries(raw)) { + if (key === 'event_result_schema' || key === 'event_result_type') continue + if (isZodSchema(value)) shape[key] = value + } + return shape as z.ZodRawShape +} + +export const toJsonSchema = (schema: unknown): unknown => { + if (!schema || !isZodSchema(schema)) return schema + const zod_any = z as unknown as { toJSONSchema?: (input: z.ZodTypeAny) => unknown } + return typeof zod_any.toJSONSchema === 'function' ? zod_any.toJSONSchema(schema) : undefined +} + +export const getStringTypeName = (schema?: z.ZodTypeAny): string | undefined => { + if (!schema) return undefined + + const visited = new Set() + const infer = (value: z.ZodTypeAny): string => { + if (visited.has(value)) return 'unknown' + visited.add(value) + + const def = (value as unknown as { _def?: Record })._def ?? {} + const kind = typeof def.type === 'string' ? def.type : '' + if (!kind) return 'unknown' + + if (WRAPPER_TYPES.has(kind)) { + return isZodSchema(def.innerType) ? infer(def.innerType) : 'unknown' + } + if (kind === 'pipe') { + return isZodSchema(def.out) ? infer(def.out) : 'unknown' + } + if (kind === 'union') { + const options = (Array.isArray(def.options) ? def.options : []).filter(isZodSchema) + if (options.length === 0) return 'unknown' + const inferred = new Set(options.map((option) => infer(option))) + return inferred.size === 1 ? [...inferred][0] : 'unknown' + } + if (kind === 'literal') { + const literal = Array.isArray(def.values) ? def.values[0] : undefined + if (literal === null) return 'null' + if (typeof literal === 'object') return 'object' + if (typeof literal === 'function') return 'function' + return typeof literal + } + if (OBJECT_LIKE_TYPES.has(kind)) return 'object' + return TYPE_ALIASES[kind] ?? kind + } + + return infer(schema) +} diff --git a/bubus-ts/tests/_perf_profile.ts b/bubus-ts/tests/_perf_profile.ts new file mode 100644 index 0000000..327f5bf --- /dev/null +++ b/bubus-ts/tests/_perf_profile.ts @@ -0,0 +1,60 @@ +import { BaseEvent, EventBus } from '../src/index.js' + +const SimpleEvent = BaseEvent.extend('SimpleEvent', {}) + +const total_events = 200_000 +// Keep full history to avoid trimming inflight events during perf profiling. +const bus = new EventBus('PerfBus', { max_history_size: total_events }) + +let processed_count = 0 +bus.on(SimpleEvent, () => { + processed_count += 1 +}) + +// Baseline memory +global.gc?.() +const mem_before = process.memoryUsage() +console.log(`Memory before: RSS=${(mem_before.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_before.heapUsed / 1024 / 1024).toFixed(1)}MB`) + +// Phase 1: Dispatch all events (measure dispatch throughput) +const t0 = performance.now() +const pending: Array> = [] +for (let i = 0; i < total_events; i++) { + pending.push(bus.dispatch(SimpleEvent({}))) +} +const t1 = performance.now() +console.log(`Dispatch ${total_events} events: ${(t1 - t0).toFixed(0)}ms (${(total_events / ((t1 - t0) / 1000)).toFixed(0)} events/s)`) + +const mem_after_dispatch = process.memoryUsage() +console.log( + `Memory after dispatch: RSS=${(mem_after_dispatch.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_after_dispatch.heapUsed / 1024 / 1024).toFixed(1)}MB` +) + +// Phase 2: Wait for all to complete +const t2 = performance.now() +await Promise.all(pending.map((e) => e.done())) +await bus.waitUntilIdle() +const t3 = performance.now() +console.log(`Await completion: ${(t3 - t2).toFixed(0)}ms`) +console.log(`Total: ${(t3 - t0).toFixed(0)}ms (${(total_events / ((t3 - t0) / 1000)).toFixed(0)} events/s)`) + +const mem_after = process.memoryUsage() +console.log( + `Memory after complete: RSS=${(mem_after.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_after.heapUsed / 1024 / 1024).toFixed(1)}MB` +) + +global.gc?.() +const mem_gc = process.memoryUsage() +console.log(`Memory after GC: RSS=${(mem_gc.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_gc.heapUsed / 1024 / 1024).toFixed(1)}MB`) + +const total_ms = t3 - t0 +console.log( + `Per-event: time=${(total_ms / total_events).toFixed(4)}ms, ` + + `heap=${((mem_after.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB, ` + + `heap_gc=${((mem_gc.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB` +) + +console.log(`\nProcessed: ${processed_count}/${total_events}`) +console.log(`History size: ${bus.event_history.size} (max: ${bus.max_history_size})`) +console.log(`Heap delta (before GC): +${((mem_after.heapUsed - mem_before.heapUsed) / 1024 / 1024).toFixed(1)}MB`) +console.log(`Heap delta (after GC): +${((mem_gc.heapUsed - mem_before.heapUsed) / 1024 / 1024).toFixed(1)}MB`) diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts new file mode 100644 index 0000000..571cc06 --- /dev/null +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -0,0 +1,1208 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' + +const ParentEvent = BaseEvent.extend('ParentEvent', {}) +const ImmediateChildEvent = BaseEvent.extend('ImmediateChildEvent', {}) +const QueuedChildEvent = BaseEvent.extend('QueuedChildEvent', {}) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('comprehensive patterns: forwarding, async/sync dispatch, parent tracking', async () => { + const bus_1 = new EventBus('bus1') + const bus_2 = new EventBus('bus2') + + const results: Array<[number, string]> = [] + const execution_counter = { count: 0 } + + const child_bus2_event_handler = (event: BaseEvent): string => { + execution_counter.count += 1 + const seq = execution_counter.count + const event_type_short = event.event_type.replace(/Event$/, '') + results.push([seq, `bus2_handler_${event_type_short}`]) + return 'forwarded bus result' + } + + bus_2.on('*', child_bus2_event_handler) + bus_1.on('*', bus_2.dispatch) + + const parent_bus1_handler = async (event: BaseEvent): Promise => { + execution_counter.count += 1 + const seq = execution_counter.count + results.push([seq, 'parent_start']) + + const child_event_async = event.bus?.emit(QueuedChildEvent({}))! + assert.notEqual(child_event_async.event_status, 'completed') + + const child_event_sync = await event.bus?.emit(ImmediateChildEvent({})).done()! + assert.equal(child_event_sync.event_status, 'completed') + + assert.ok(child_event_sync.event_path.includes('bus2')) + assert.ok(Array.from(child_event_sync.event_results.values()).some((result) => result.handler_name.includes('dispatch'))) + + assert.equal(child_event_async.event_parent_id, event.event_id) + assert.equal(child_event_sync.event_parent_id, event.event_id) + + execution_counter.count += 1 + const end_seq = execution_counter.count + results.push([end_seq, 'parent_end']) + return 'parent_done' + } + + bus_1.on(ParentEvent, parent_bus1_handler) + + const parent_event = bus_1.dispatch(ParentEvent({})) + await parent_event.done() + await bus_1.waitUntilIdle() + await bus_2.waitUntilIdle() + + const event_children = Array.from(bus_1.event_history.values()).filter( + (event) => event.event_type === 'ImmediateChildEvent' || event.event_type === 'QueuedChildEvent' + ) + assert.ok(event_children.length > 0) + assert.ok(event_children.every((event) => event.event_parent_id === parent_event.event_id)) + + const sorted_results = results.slice().sort((a, b) => a[0] - b[0]) + const execution_order = sorted_results.map((item) => item[1]) + + assert.equal(execution_order[0], 'parent_start') + assert.ok(execution_order.includes('bus2_handler_ImmediateChild')) + + if (execution_order.includes('parent_end')) { + const parent_end_idx = execution_order.indexOf('parent_end') + assert.ok(parent_end_idx > 1) + } + + assert.equal(execution_order.filter((value) => value === 'bus2_handler_ImmediateChild').length, 1) + assert.equal(execution_order.filter((value) => value === 'bus2_handler_QueuedChild').length, 1) + assert.equal(execution_order.filter((value) => value === 'bus2_handler_Parent').length, 1) +}) + +test('race condition stress', async () => { + const bus_1 = new EventBus('bus1') + const bus_2 = new EventBus('bus2') + const RootEvent = BaseEvent.extend('RootEvent', {}) + + const results: string[] = [] + + const child_handler = async (event: BaseEvent): Promise => { + const bus_name = event.event_path[event.event_path.length - 1] ?? 'unknown' + results.push(`child_${bus_name}`) + await delay(1) + return `child_done_${bus_name}` + } + + const parent_handler = async (event: BaseEvent): Promise => { + const children: BaseEvent[] = [] + + for (let i = 0; i < 3; i += 1) { + children.push(event.bus?.emit(QueuedChildEvent({}))!) + } + + for (let i = 0; i < 3; i += 1) { + const child = await event.bus?.emit(ImmediateChildEvent({})).done()! + assert.equal(child.event_status, 'completed') + children.push(child) + } + + assert.ok(children.every((child) => child.event_parent_id === event.event_id)) + return 'parent_done' + } + + const bad_handler = (_bad: BaseEvent): void => {} + + bus_1.on('*', bus_2.dispatch) + bus_1.on(QueuedChildEvent, child_handler) + bus_1.on(ImmediateChildEvent, child_handler) + bus_2.on(QueuedChildEvent, child_handler) + bus_2.on(ImmediateChildEvent, child_handler) + bus_1.on(RootEvent, parent_handler) + bus_1.on(RootEvent, bad_handler) + + for (let run = 0; run < 5; run += 1) { + results.length = 0 + + const event = bus_1.dispatch(RootEvent({})) + await event.done() + await bus_1.waitUntilIdle() + await bus_2.waitUntilIdle() + + assert.equal( + results.filter((value) => value === 'child_bus1').length, + 6, + `Run ${run}: Expected 6 child_bus1, got ${results.filter((value) => value === 'child_bus1').length}` + ) + assert.equal( + results.filter((value) => value === 'child_bus2').length, + 6, + `Run ${run}: Expected 6 child_bus2, got ${results.filter((value) => value === 'child_bus2').length}` + ) + } +}) + +test('awaited child jumps queue without overshoot', async () => { + const bus = new EventBus('TestBus', { max_history_size: 100 }) + const execution_order: string[] = [] + const debug_order: Array<{ label: string; at: string }> = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Event3 = BaseEvent.extend('Event3', {}) + const LocalChildEvent = BaseEvent.extend('ChildEvent', {}) + + const event1_handler = async (_event: BaseEvent): Promise => { + execution_order.push('Event1_start') + debug_order.push({ label: 'Event1_start', at: new Date().toISOString() }) + const child = _event.bus?.emit(LocalChildEvent({}))! + execution_order.push('Child_dispatched') + debug_order.push({ label: 'Child_dispatched', at: new Date().toISOString() }) + await child.done() + execution_order.push('Child_await_returned') + debug_order.push({ label: 'Child_await_returned', at: new Date().toISOString() }) + execution_order.push('Event1_end') + debug_order.push({ label: 'Event1_end', at: new Date().toISOString() }) + return 'event1_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Event2_start') + debug_order.push({ label: 'Event2_start', at: new Date().toISOString() }) + execution_order.push('Event2_end') + debug_order.push({ label: 'Event2_end', at: new Date().toISOString() }) + return 'event2_done' + } + + const event3_handler = async (): Promise => { + execution_order.push('Event3_start') + debug_order.push({ label: 'Event3_start', at: new Date().toISOString() }) + execution_order.push('Event3_end') + debug_order.push({ label: 'Event3_end', at: new Date().toISOString() }) + return 'event3_done' + } + + const child_handler = async (): Promise => { + execution_order.push('Child_start') + debug_order.push({ label: 'Child_start', at: new Date().toISOString() }) + execution_order.push('Child_end') + debug_order.push({ label: 'Child_end', at: new Date().toISOString() }) + return 'child_done' + } + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(LocalChildEvent, child_handler) + + const event_1 = bus.dispatch(Event1({})) + const event_2 = bus.dispatch(Event2({})) + const event_3 = bus.dispatch(Event3({})) + + // Wait for everything to complete + await event_1.done() + await bus.waitUntilIdle() + + // Core assertion: child jumped the queue and ran DURING Event1's handler + assert.ok(execution_order.includes('Child_start')) + assert.ok(execution_order.includes('Child_end')) + const child_start_idx = execution_order.indexOf('Child_start') + const child_end_idx = execution_order.indexOf('Child_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child_start_idx < event1_end_idx, 'child must start before Event1 handler returns') + assert.ok(child_end_idx < event1_end_idx, 'child must end before Event1 handler returns') + + // No overshoot: Event2 and Event3 must only start AFTER Event1's handler fully completes. + // In JS, the microtask-based runloop processes them after Event1 completes (so they may + // already be done by this point), but the key guarantee is ordering, not timing. + const event2_start_idx = execution_order.indexOf('Event2_start') + const event3_start_idx = execution_order.indexOf('Event3_start') + assert.ok(event2_start_idx > event1_end_idx, 'Event2 must not start until Event1 handler returns') + assert.ok(event3_start_idx > event1_end_idx, 'Event3 must not start until Event1 handler returns') + + // FIFO preserved among queued events + assert.ok(event2_start_idx < event3_start_idx, 'Event2 must start before Event3 (FIFO)') + + // All events completed + assert.equal(event_1.event_status, 'completed') + assert.equal(event_2.event_status, 'completed') + assert.equal(event_3.event_status, 'completed') + + // Timestamp ordering confirms the same + const history_list = Array.from(bus.event_history.values()) + const child_event = history_list.find((event) => event.event_type === 'ChildEvent') + const event2_from_history = history_list.find((event) => event.event_type === 'Event2') + const event3_from_history = history_list.find((event) => event.event_type === 'Event3') + + assert.ok(child_event?.event_started_ts !== undefined) + assert.ok(event2_from_history?.event_started_ts !== undefined) + assert.ok(event3_from_history?.event_started_ts !== undefined) + + assert.ok(child_event!.event_started_ts! <= event2_from_history!.event_started_ts!) + assert.ok(child_event!.event_started_ts! <= event3_from_history!.event_started_ts!) +}) + +test('done() on non-proxied event keeps bus paused during queue-jump', async () => { + const bus = new EventBus('RawDoneBus', { max_history_size: 100 }) + const Event1 = BaseEvent.extend('Event1', {}) + const ChildEvent = BaseEvent.extend('RawChild', {}) + + let paused_after_done = false + + bus.on(ChildEvent, () => {}) + + bus.on(Event1, async (_event) => { + // Dispatch child via the raw bus (not the proxied event.bus) + const child = bus.dispatch(ChildEvent({})) + // Get the raw (non-proxied) event + const raw_child = child._event_original ?? child + // done() on raw event bypasses handler_result injection from proxy + await raw_child.done() + // After done() returns, bus should still be paused because + // we're still inside a handler doing queue-jump processing + paused_after_done = bus.locks.isPaused() + }) + + bus.dispatch(Event1({})) + await bus.waitUntilIdle() + + assert.equal(paused_after_done, true, 'bus should be paused after raw done() but before handler returns') +}) + +test('bus pause state clears after queue-jump completes', async () => { + const bus = new EventBus('DepthBalanceBus', { max_history_size: 100 }) + const Event1 = BaseEvent.extend('DepthEvent1', {}) + const ChildA = BaseEvent.extend('DepthChildA', {}) + const ChildB = BaseEvent.extend('DepthChildB', {}) + + let paused_during_handler = false + let paused_between_dones = false + let paused_after_second_done = false + + bus.on(ChildA, () => {}) + bus.on(ChildB, () => {}) + + bus.on(Event1, async (event) => { + // First queue-jump + const child_a = event.bus?.emit(ChildA({}))! + await child_a.done() + paused_during_handler = bus.locks.isPaused() + + // Second queue-jump β€” bus should remain paused across both awaits. + const child_b = event.bus?.emit(ChildB({}))! + paused_between_dones = bus.locks.isPaused() + await child_b.done() + paused_after_second_done = bus.locks.isPaused() + }) + + bus.dispatch(Event1({})) + await bus.waitUntilIdle() + + // During handler, pause should still be held. + assert.equal(paused_during_handler, true, 'bus should remain paused after first done()') + + // Between done() calls, pause should still be held. + assert.equal(paused_between_dones, true, 'bus should remain paused between done() calls') + + // After second done(), pause is still held until handler returns. + assert.equal(paused_after_second_done, true, 'bus should remain paused after second done()') + + // After handler finishes and bus is idle, pause must be released. + assert.equal(bus.locks.isPaused(), false, 'bus should no longer be paused after handler completes') +}) + +test('isInsideHandler() is per-bus, not global', async () => { + const bus_a = new EventBus('InsideHandlerA', { max_history_size: 100 }) + const bus_b = new EventBus('InsideHandlerB', { max_history_size: 100 }) + + const EventA = BaseEvent.extend('InsideHandlerEventA', {}) + const EventB = BaseEvent.extend('InsideHandlerEventB', {}) + + let bus_a_inside_during_a_handler = false + let bus_b_inside_during_a_handler = false + let bus_a_inside_during_b_handler = false + let bus_b_inside_during_b_handler = false + + bus_a.on(EventA, () => { + bus_a_inside_during_a_handler = bus_a.locks.isAnyHandlerActive() + bus_b_inside_during_a_handler = bus_b.locks.isAnyHandlerActive() + }) + + bus_b.on(EventB, () => { + bus_a_inside_during_b_handler = bus_a.locks.isAnyHandlerActive() + bus_b_inside_during_b_handler = bus_b.locks.isAnyHandlerActive() + }) + + // Dispatch to bus_a first, wait for completion so bus_b has no active handlers + await bus_a.dispatch(EventA({})).done() + await bus_a.waitUntilIdle() + + // Then dispatch to bus_b so bus_a has no active handlers + await bus_b.dispatch(EventB({})).done() + await bus_b.waitUntilIdle() + + // During bus_a's handler: bus_a should report inside, bus_b should not + assert.equal(bus_a_inside_during_a_handler, true, 'bus_a.locks.isAnyHandlerActive() should be true during bus_a handler') + assert.equal(bus_b_inside_during_a_handler, false, 'bus_b.locks.isAnyHandlerActive() should be false during bus_a handler') + + // During bus_b's handler: bus_b should report inside, bus_a should not + assert.equal(bus_b_inside_during_b_handler, true, 'bus_b.locks.isAnyHandlerActive() should be true during bus_b handler') + assert.equal(bus_a_inside_during_b_handler, false, 'bus_a.locks.isAnyHandlerActive() should be false during bus_b handler') + + // After all handlers complete, neither bus should report inside + assert.equal(bus_a.locks.isAnyHandlerActive(), false, 'bus_a.locks.isAnyHandlerActive() should be false after idle') + assert.equal(bus_b.locks.isAnyHandlerActive(), false, 'bus_b.locks.isAnyHandlerActive() should be false after idle') +}) + +test('dispatch multiple, await one skips others until after handler completes', async () => { + const bus = new EventBus('MultiDispatchBus', { max_history_size: 100 }) + const execution_order: string[] = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Event3 = BaseEvent.extend('Event3', {}) + const ChildA = BaseEvent.extend('ChildA', {}) + const ChildB = BaseEvent.extend('ChildB', {}) + const ChildC = BaseEvent.extend('ChildC', {}) + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push('Event1_start') + + event.bus?.emit(ChildA({})) + execution_order.push('ChildA_dispatched') + + const child_b = event.bus?.emit(ChildB({}))! + execution_order.push('ChildB_dispatched') + + event.bus?.emit(ChildC({})) + execution_order.push('ChildC_dispatched') + + await child_b.done() + execution_order.push('ChildB_await_returned') + + execution_order.push('Event1_end') + return 'event1_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } + + const event3_handler = async (): Promise => { + execution_order.push('Event3_start') + execution_order.push('Event3_end') + return 'event3_done' + } + + const child_a_handler = async (): Promise => { + execution_order.push('ChildA_start') + execution_order.push('ChildA_end') + return 'child_a_done' + } + + const child_b_handler = async (): Promise => { + execution_order.push('ChildB_start') + execution_order.push('ChildB_end') + return 'child_b_done' + } + + const child_c_handler = async (): Promise => { + execution_order.push('ChildC_start') + execution_order.push('ChildC_end') + return 'child_c_done' + } + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(ChildA, child_a_handler) + bus.on(ChildB, child_b_handler) + bus.on(ChildC, child_c_handler) + + const event_1 = bus.dispatch(Event1({})) + bus.dispatch(Event2({})) + bus.dispatch(Event3({})) + + await event_1.done() + + assert.ok(execution_order.includes('ChildB_start')) + assert.ok(execution_order.includes('ChildB_end')) + + const child_b_end_idx = execution_order.indexOf('ChildB_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child_b_end_idx < event1_end_idx) + + if (execution_order.includes('ChildA_start')) { + const child_a_start_idx = execution_order.indexOf('ChildA_start') + assert.ok(child_a_start_idx > event1_end_idx) + } + if (execution_order.includes('ChildC_start')) { + const child_c_start_idx = execution_order.indexOf('ChildC_start') + assert.ok(child_c_start_idx > event1_end_idx) + } + if (execution_order.includes('Event2_start')) { + const event2_start_idx = execution_order.indexOf('Event2_start') + assert.ok(event2_start_idx > event1_end_idx) + } + if (execution_order.includes('Event3_start')) { + const event3_start_idx = execution_order.indexOf('Event3_start') + assert.ok(event3_start_idx > event1_end_idx) + } + + await bus.waitUntilIdle() + + const event2_start_idx = execution_order.indexOf('Event2_start') + const event3_start_idx = execution_order.indexOf('Event3_start') + const child_a_start_idx = execution_order.indexOf('ChildA_start') + const child_c_start_idx = execution_order.indexOf('ChildC_start') + + assert.ok(event2_start_idx < event3_start_idx) + assert.ok(event3_start_idx < child_a_start_idx) + assert.ok(child_a_start_idx < child_c_start_idx) +}) + +test('multi-bus queues are independent when awaiting child', async () => { + const bus_1 = new EventBus('Bus1', { max_history_size: 100 }) + const bus_2 = new EventBus('Bus2', { max_history_size: 100 }) + const execution_order: string[] = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Event3 = BaseEvent.extend('Event3', {}) + const Event4 = BaseEvent.extend('Event4', {}) + const LocalChildEvent = BaseEvent.extend('ChildEvent', {}) + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push('Bus1_Event1_start') + const child = event.bus?.emit(LocalChildEvent({}))! + execution_order.push('Child_dispatched_to_Bus1') + await child.done() + execution_order.push('Child_await_returned') + execution_order.push('Bus1_Event1_end') + return 'event1_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Bus1_Event2_start') + execution_order.push('Bus1_Event2_end') + return 'event2_done' + } + + const event3_handler = async (): Promise => { + execution_order.push('Bus2_Event3_start') + execution_order.push('Bus2_Event3_end') + return 'event3_done' + } + + const event4_handler = async (): Promise => { + execution_order.push('Bus2_Event4_start') + execution_order.push('Bus2_Event4_end') + return 'event4_done' + } + + const child_handler = async (): Promise => { + execution_order.push('Child_start') + execution_order.push('Child_end') + return 'child_done' + } + + bus_1.on(Event1, event1_handler) + bus_1.on(Event2, event2_handler) + bus_1.on(LocalChildEvent, child_handler) + + bus_2.on(Event3, event3_handler) + bus_2.on(Event4, event4_handler) + + const event_1 = bus_1.dispatch(Event1({})) + bus_1.dispatch(Event2({})) + bus_2.dispatch(Event3({})) + bus_2.dispatch(Event4({})) + + await delay(0) + + await event_1.done() + + assert.ok(execution_order.includes('Child_start')) + assert.ok(execution_order.includes('Child_end')) + + const child_end_idx = execution_order.indexOf('Child_end') + const event1_end_idx = execution_order.indexOf('Bus1_Event1_end') + assert.ok(child_end_idx < event1_end_idx) + + const bus1_event2_start_idx = execution_order.indexOf('Bus1_Event2_start') + if (bus1_event2_start_idx !== -1) { + assert.ok(bus1_event2_start_idx > event1_end_idx) + } + + const bus2_event3_start_idx = execution_order.indexOf('Bus2_Event3_start') + const bus2_event4_start_idx = execution_order.indexOf('Bus2_Event4_start') + assert.ok(bus2_event3_start_idx !== -1 || bus2_event4_start_idx !== -1) + const bus2_start_idx = + bus2_event3_start_idx === -1 + ? bus2_event4_start_idx + : bus2_event4_start_idx === -1 + ? bus2_event3_start_idx + : Math.min(bus2_event3_start_idx, bus2_event4_start_idx) + assert.ok(bus2_start_idx < event1_end_idx) + + await bus_1.waitUntilIdle() + await bus_2.waitUntilIdle() + + assert.ok(execution_order.includes('Bus1_Event2_start')) + assert.ok(execution_order.includes('Bus2_Event3_start')) + assert.ok(execution_order.includes('Bus2_Event4_start')) +}) + +test('awaiting an already completed event is a no-op', async () => { + const bus = new EventBus('AlreadyCompletedBus', { max_history_size: 100 }) + const execution_order: string[] = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + + const event1_handler = async (): Promise => { + execution_order.push('Event1_start') + execution_order.push('Event1_end') + return 'event1_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + + const event_1 = await bus.dispatch(Event1({})).done() + assert.equal(event_1.event_status, 'completed') + + const event_2 = bus.dispatch(Event2({})) + + await event_1.done() + + assert.equal(event_2.event_status, 'pending') + + await bus.waitUntilIdle() +}) + +test('multiple awaits on same event', async () => { + const bus = new EventBus('MultiAwaitBus', { max_history_size: 100 }) + const execution_order: string[] = [] + const await_results: string[] = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const LocalChildEvent = BaseEvent.extend('ChildEvent', {}) + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push('Event1_start') + + const child = event.bus?.emit(LocalChildEvent({}))! + + const await_child = async (name: string): Promise => { + await child.done() + await_results.push(`${name}_completed`) + } + + await Promise.all([await_child('await1'), await_child('await2')]) + execution_order.push('Both_awaits_completed') + execution_order.push('Event1_end') + return 'event1_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } + + const child_handler = async (): Promise => { + execution_order.push('Child_start') + await delay(10) + execution_order.push('Child_end') + return 'child_done' + } + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(LocalChildEvent, child_handler) + + const event_1 = bus.dispatch(Event1({})) + bus.dispatch(Event2({})) + + await event_1.done() + + assert.equal(await_results.length, 2) + assert.ok(await_results.includes('await1_completed')) + assert.ok(await_results.includes('await2_completed')) + + assert.ok(execution_order.includes('Child_start')) + assert.ok(execution_order.includes('Child_end')) + const child_end_idx = execution_order.indexOf('Child_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child_end_idx < event1_end_idx) + + assert.ok(!execution_order.includes('Event2_start')) + + await bus.waitUntilIdle() +}) + +test('deeply nested awaited children', async () => { + const bus = new EventBus('DeepNestedBus', { max_history_size: 100 }) + const execution_order: string[] = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Child1 = BaseEvent.extend('Child1', {}) + const Child2 = BaseEvent.extend('Child2', {}) + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push('Event1_start') + const child1 = event.bus?.emit(Child1({}))! + await child1.done() + execution_order.push('Event1_end') + return 'event1_done' + } + + const child1_handler = async (event: BaseEvent): Promise => { + execution_order.push('Child1_start') + const child2 = event.bus?.emit(Child2({}))! + await child2.done() + execution_order.push('Child1_end') + return 'child1_done' + } + + const child2_handler = async (): Promise => { + execution_order.push('Child2_start') + execution_order.push('Child2_end') + return 'child2_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } + + bus.on(Event1, event1_handler) + bus.on(Child1, child1_handler) + bus.on(Child2, child2_handler) + bus.on(Event2, event2_handler) + + const event_1 = bus.dispatch(Event1({})) + bus.dispatch(Event2({})) + + await event_1.done() + + assert.ok(execution_order.includes('Child1_start')) + assert.ok(execution_order.includes('Child1_end')) + assert.ok(execution_order.includes('Child2_start')) + assert.ok(execution_order.includes('Child2_end')) + + const child2_end_idx = execution_order.indexOf('Child2_end') + const child1_end_idx = execution_order.indexOf('Child1_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child2_end_idx < child1_end_idx) + assert.ok(child1_end_idx < event1_end_idx) + + assert.ok(!execution_order.includes('Event2_start')) + + await bus.waitUntilIdle() + + const event2_start_idx = execution_order.indexOf('Event2_start') + assert.ok(event2_start_idx > event1_end_idx) +}) + +// ============================================================================= +// Queue-Jump Concurrency Tests (Two-Bus) +// +// BUG: runImmediatelyAcrossBuses passes { bypass_handler_semaphores: true, +// bypass_event_semaphores: true } for ALL buses. This causes: +// 1. Handlers to run in parallel regardless of configured concurrency +// 2. Event semaphores on remote buses to be skipped +// +// The fix requires "yield-and-reacquire": +// - Before processing the child, temporarily RELEASE the semaphore the parent +// handler holds (the parent is suspended in `await child.done()` and isn't +// using it). +// - Process the child event NORMALLY β€” handlers acquire/release the real +// semaphore, serializing among themselves as configured. +// - After the child completes, RE-ACQUIRE the semaphore for the parent handler +// before it resumes. +// +// For event semaphores, only bypass on the initiating bus (where the parent holds +// the semaphore). On other buses, respect their event concurrency β€” bypass only +// if they resolve to the SAME semaphore instance (i.e. global-serial). +// +// All tests use two buses. The pattern is: +// bus_a: origin bus where TriggerEvent handler dispatches a child +// bus_b: forward bus that also handles the child event +// The trigger handler dispatches the child on bus_a and also to bus_b, +// then awaits child.done(), which queue-jumps the child on both buses. +// ============================================================================= + +test('BUG: queue-jump two-bus bus-serial handlers should serialize on each bus', async () => { + const TriggerEvent = BaseEvent.extend('QJ2BS_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2BS_Child', {}) + + const bus_a = new EventBus('QJ2BS_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('QJ2BS_B', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + + const log: string[] = [] + + // Two handlers per bus. handler_1 is slow (15ms), handler_2 is fast (5ms). + // With bus-serial, handler_1 must finish before handler_2 starts ON EACH BUS. + // With buggy parallel, both start simultaneously and handler_2 finishes first. + const a_handler_1 = async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + } + const a_handler_2 = async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + } + const b_handler_1 = async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + } + const b_handler_2 = async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + } + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // Bus A: handlers must serialize (a1 finishes before a2 starts) + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a1_end >= 0 && a2_start >= 0, 'bus_a handlers should have run') + assert.ok(a1_end < a2_start, `bus_a (bus-serial): a1 should finish before a2 starts. Got: [${log.join(', ')}]`) + + // Bus B: handlers must serialize (b1 finishes before b2 starts) + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b1_end >= 0 && b2_start >= 0, 'bus_b handlers should have run') + assert.ok(b1_end < b2_start, `bus_b (bus-serial): b1 should finish before b2 starts. Got: [${log.join(', ')}]`) +}) + +test('BUG: queue-jump two-bus global-serial handlers should serialize across both buses', async () => { + const TriggerEvent = BaseEvent.extend('QJ2GS_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2GS_Child', {}) + + // Global-serial means ONE handler at a time GLOBALLY, across all buses. + const bus_a = new EventBus('QJ2GS_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'global-serial', + }) + const bus_b = new EventBus('QJ2GS_B', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'global-serial', + }) + + const log: string[] = [] + + const a_handler_1 = async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + } + const a_handler_2 = async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + } + const b_handler_1 = async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + } + const b_handler_2 = async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + } + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // With global-serial, no two handlers should overlap anywhere. + // runImmediatelyAcrossBuses processes buses sequentially (bus_a first, + // then bus_b), so the expected order is strictly serial: + // a1_start, a1_end, a2_start, a2_end, b1_start, b1_end, b2_start, b2_end + // + // With the bug (bypass), all handlers on a bus run in parallel: + // a1_start, a2_start, a2_end, a1_end, b1_start, b2_start, b2_end, b1_end + + // Check: within bus_a, handlers are serial + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a1_end < a2_start, `global-serial: a1 should finish before a2 starts. Got: [${log.join(', ')}]`) + + // Check: within bus_b, handlers are serial + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b1_end < b2_start, `global-serial: b1 should finish before b2 starts. Got: [${log.join(', ')}]`) + + // Check: bus_a handlers all finish before bus_b handlers start + // (because runImmediatelyAcrossBuses processes sequentially and + // all share LockManager.global_handler_semaphore) + const a2_end = log.indexOf('a2_end') + const b1_start = log.indexOf('b1_start') + assert.ok(a2_end < b1_start, `global-serial: bus_a should finish before bus_b starts. Got: [${log.join(', ')}]`) +}) + +test('BUG: queue-jump two-bus mixed: bus_a bus-serial, bus_b parallel', async () => { + const TriggerEvent = BaseEvent.extend('QJ2Mix1_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2Mix1_Child', {}) + + const bus_a = new EventBus('QJ2Mix1_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('QJ2Mix1_B', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'parallel', // bus_b handlers should run in parallel + }) + + const log: string[] = [] + + const a_handler_1 = async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + } + const a_handler_2 = async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + } + const b_handler_1 = async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + } + const b_handler_2 = async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + } + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // Bus A (bus-serial): a1 must finish before a2 starts + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a1_end < a2_start, `bus_a (bus-serial): a1 should finish before a2 starts. Got: [${log.join(', ')}]`) + + // Bus B (parallel): both handlers should start before the slower one finishes. + // b2 (5ms) starts and finishes before b1 (15ms) finishes. + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b2_start < b1_end, `bus_b (parallel): b2 should start before b1 finishes. Got: [${log.join(', ')}]`) +}) + +test('BUG: queue-jump two-bus mixed: bus_a parallel, bus_b bus-serial', async () => { + const TriggerEvent = BaseEvent.extend('QJ2Mix2_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2Mix2_Child', {}) + + const bus_a = new EventBus('QJ2Mix2_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'parallel', // bus_a handlers should run in parallel + }) + const bus_b = new EventBus('QJ2Mix2_B', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + + const log: string[] = [] + + const a_handler_1 = async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + } + const a_handler_2 = async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + } + const b_handler_1 = async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + } + const b_handler_2 = async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + } + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // Bus A (parallel): handlers should overlap + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a2_start < a1_end, `bus_a (parallel): a2 should start before a1 finishes. Got: [${log.join(', ')}]`) + + // Bus B (bus-serial): b1 must finish before b2 starts + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b1_end < b2_start, `bus_b (bus-serial): b1 should finish before b2 starts. Got: [${log.join(', ')}]`) +}) + +// ============================================================================= +// Event-level concurrency on the forward bus. +// +// When the forward bus (bus_b) has bus-serial event concurrency and is already +// processing an event, a queue-jumped child should WAIT for bus_b's in-flight +// event to finish. The current code bypasses event semaphores for ALL buses, +// causing the child to cut in front of the in-flight event. +// +// The fix should only bypass event semaphores on the INITIATING bus (where the +// parent event holds the semaphore). On other buses, bypass only if they resolve +// to the SAME semaphore instance (global-serial shares one global semaphore). +// ============================================================================= + +test('BUG: queue-jump should respect bus-serial event concurrency on forward bus', async () => { + const TriggerEvent = BaseEvent.extend('QJEvt_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJEvt_Child', {}) + const SlowEvent = BaseEvent.extend('QJEvt_Slow', {}) + + const bus_a = new EventBus('QJEvt_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('QJEvt_B', { + event_concurrency: 'bus-serial', // only one event at a time on bus_b + event_handler_concurrency: 'bus-serial', + }) + + const log: string[] = [] + + // SlowEvent handler: occupies bus_b's event semaphore for 40ms + bus_b.on(SlowEvent, async () => { + log.push('slow_start') + await delay(40) + log.push('slow_end') + }) + + // ChildEvent handler on bus_b: should only run after SlowEvent finishes + bus_b.on(ChildEvent, async () => { + log.push('child_b_start') + await delay(5) + log.push('child_b_end') + }) + + // ChildEvent handler on bus_a (so bus_a also processes the child) + bus_a.on(ChildEvent, async () => { + log.push('child_a_start') + await delay(5) + log.push('child_a_end') + }) + + // TriggerEvent handler: dispatches child to both buses, awaits completion + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + + // Step 1: Start a slow event on bus_b so it's busy + bus_b.dispatch(SlowEvent({ event_timeout: null })) + await delay(5) // let slow_handler start + + // Step 2: Trigger the queue-jump on bus_a + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // The child on bus_b should start AFTER the slow event finishes, + // because bus_b has bus-serial event concurrency. + const slow_end = log.indexOf('slow_end') + const child_b_start = log.indexOf('child_b_start') + assert.ok(slow_end >= 0, 'slow event should have completed') + assert.ok(child_b_start >= 0, 'child on bus_b should have run') + assert.ok( + slow_end < child_b_start, + `bus_b (bus-serial events): child should wait for slow event to finish. ` + `Got: [${log.join(', ')}]` + ) + + // The child on bus_a should have processed (queue-jumped, bypasses bus_a's event semaphore) + assert.ok(log.includes('child_a_start'), 'child on bus_a should have run') + assert.ok(log.includes('child_a_end'), 'child on bus_a should have completed') +}) + +test('queue-jump with fully-parallel forward bus starts immediately', async () => { + // When bus_b uses parallel event AND handler concurrency, the queue-jumped + // child should start immediately even while another event's handler is running. + + const TriggerEvent = BaseEvent.extend('QJFullPar_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJFullPar_Child', {}) + const SlowEvent = BaseEvent.extend('QJFullPar_Slow', {}) + + const bus_a = new EventBus('QJFullPar_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('QJFullPar_B', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + const log: string[] = [] + + bus_b.on(SlowEvent, async () => { + log.push('slow_start') + await delay(40) + log.push('slow_end') + }) + + bus_b.on(ChildEvent, async () => { + log.push('child_b_start') + await delay(5) + log.push('child_b_end') + }) + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + + bus_b.dispatch(SlowEvent({ event_timeout: null })) + await delay(5) + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + const slow_end = log.indexOf('slow_end') + const child_b_start = log.indexOf('child_b_start') + assert.ok(child_b_start >= 0, 'child on bus_b should have run') + assert.ok(child_b_start < slow_end, `bus_b (fully parallel): child should start before slow finishes. ` + `Got: [${log.join(', ')}]`) +}) + +test('queue-jump with parallel events but bus-serial handlers on forward bus serializes handlers', async () => { + // When bus_b has parallel event concurrency but bus-serial handler concurrency, + // the child event can start processing immediately (event semaphore is parallel), + // but its handler must wait for the slow handler to release the handler semaphore. + + const TriggerEvent = BaseEvent.extend('QJEvtParHSer_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJEvtParHSer_Child', {}) + const SlowEvent = BaseEvent.extend('QJEvtParHSer_Slow', {}) + + const bus_a = new EventBus('QJEvtParHSer_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('QJEvtParHSer_B', { + event_concurrency: 'parallel', // events can start concurrently + event_handler_concurrency: 'bus-serial', // but handlers serialize + }) + + const log: string[] = [] + + bus_b.on(SlowEvent, async () => { + log.push('slow_start') + await delay(40) + log.push('slow_end') + }) + + bus_b.on(ChildEvent, async () => { + log.push('child_b_start') + await delay(5) + log.push('child_b_end') + }) + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + + bus_b.dispatch(SlowEvent({ event_timeout: null })) + await delay(5) + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // With bus-serial handler concurrency, child handler must wait for slow handler + const slow_end = log.indexOf('slow_end') + const child_b_start = log.indexOf('child_b_start') + assert.ok(child_b_start >= 0, 'child on bus_b should have run') + assert.ok( + child_b_start > slow_end, + `bus_b (bus-serial handlers): child handler should wait for slow handler. ` + `Got: [${log.join(', ')}]` + ) +}) diff --git a/bubus-ts/tests/context_propagation.test.ts b/bubus-ts/tests/context_propagation.test.ts new file mode 100644 index 0000000..a597aea --- /dev/null +++ b/bubus-ts/tests/context_propagation.test.ts @@ -0,0 +1,307 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' +import { async_local_storage, hasAsyncLocalStorage } from '../src/async_context.js' + +type ContextStore = { + request_id?: string + user_id?: string + trace_id?: string +} + +const SimpleEvent = BaseEvent.extend('SimpleEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) + +const skip_if_no_async_local_storage = !hasAsyncLocalStorage() + +const require_async_local_storage = () => { + assert.ok(async_local_storage, 'AsyncLocalStorage not available') + return async_local_storage +} + +const get_store = (store: ContextStore | undefined | null): ContextStore => store ?? {} + +test('context propagates to handler', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('ContextTestBus') + const captured_values: ContextStore = {} + const storage = require_async_local_storage() + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.request_id = store?.request_id + captured_values.user_id = store?.user_id + }) + + await storage.run({ request_id: 'req-12345', user_id: 'user-abc' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.equal(captured_values.request_id, 'req-12345') + assert.equal(captured_values.user_id, 'user-abc') +}) + +test('context propagates through nested handlers', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('NestedContextBus') + const captured_parent: ContextStore = {} + const captured_child: ContextStore = {} + const storage = require_async_local_storage() + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined + captured_parent.request_id = store?.request_id + captured_parent.trace_id = store?.trace_id + + const child = event.bus?.dispatch(ChildEvent({})) + if (child) { + await child.done() + } + }) + + bus.on(ChildEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_child.request_id = store?.request_id + captured_child.trace_id = store?.trace_id + }) + + await storage.run({ request_id: 'req-nested-123', trace_id: 'trace-xyz' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.equal(captured_parent.request_id, 'req-nested-123') + assert.equal(captured_parent.trace_id, 'trace-xyz') + assert.equal(captured_child.request_id, 'req-nested-123') + assert.equal(captured_child.trace_id, 'trace-xyz') +}) + +test('context isolation between dispatches', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('IsolationTestBus') + const captured_values: string[] = [] + const storage = require_async_local_storage() + + bus.on(SimpleEvent, async () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.push(store?.request_id ?? '') + }) + + const event_a = storage.run({ request_id: 'req-A' }, () => bus.dispatch(SimpleEvent({}))) + const event_b = storage.run({ request_id: 'req-B' }, () => bus.dispatch(SimpleEvent({}))) + + await event_a.done() + await event_b.done() + + assert.ok(captured_values.includes('req-A')) + assert.ok(captured_values.includes('req-B')) +}) + +test('context propagates to multiple handlers', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('ParallelContextBus') + const captured_values: string[] = [] + const storage = require_async_local_storage() + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.push(`h1:${store?.request_id ?? ''}`) + }) + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.push(`h2:${store?.request_id ?? ''}`) + }) + + await storage.run({ request_id: 'req-parallel' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.ok(captured_values.includes('h1:req-parallel')) + assert.ok(captured_values.includes('h2:req-parallel')) +}) + +test('context propagates through event forwarding', { skip: skip_if_no_async_local_storage }, async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + const captured_bus_a: ContextStore = {} + const captured_bus_b: ContextStore = {} + const storage = require_async_local_storage() + + bus_a.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_bus_a.request_id = store?.request_id + }) + + bus_b.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_bus_b.request_id = store?.request_id + }) + + bus_a.on('*', bus_b.dispatch) + + await storage.run({ request_id: 'req-forwarded' }, async () => { + const event = bus_a.dispatch(SimpleEvent({})) + await event.done() + await bus_b.waitUntilIdle() + }) + + assert.equal(captured_bus_a.request_id, 'req-forwarded') + assert.equal(captured_bus_b.request_id, 'req-forwarded') +}) + +test('handler can modify context without affecting parent', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('ModifyContextBus') + const storage = require_async_local_storage() + let parent_value_after_child = '' + + bus.on(SimpleEvent, async (event) => { + if (!storage.enterWith) { + throw new Error('AsyncLocalStorage.enterWith is required for this test') + } + storage.enterWith({ request_id: 'parent-value' }) + const child = event.bus?.dispatch(ChildEvent({})) + if (child) { + await child.done() + } + const store = get_store(storage.getStore() as ContextStore | undefined) + parent_value_after_child = store.request_id ?? '' + }) + + bus.on(ChildEvent, () => { + if (!storage.enterWith) { + throw new Error('AsyncLocalStorage.enterWith is required for this test') + } + storage.enterWith({ request_id: 'child-modified' }) + }) + + await storage.run({}, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.equal(parent_value_after_child, 'parent-value') +}) + +test('event parent_id tracking still works with context propagation', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('ParentIdTrackingBus') + const storage = require_async_local_storage() + let parent_event_id: string | undefined + let child_event_parent_id: string | undefined + + bus.on(SimpleEvent, async (event) => { + parent_event_id = event.event_id + const child = event.bus?.dispatch(ChildEvent({})) + if (child) { + await child.done() + } + }) + + bus.on(ChildEvent, (event) => { + child_event_parent_id = event.event_parent_id + }) + + await storage.run({ request_id: 'req-parent-tracking' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.ok(parent_event_id) + assert.ok(child_event_parent_id) + assert.equal(child_event_parent_id, parent_event_id) +}) + +test('dispatch context and parent_id both work together', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('CombinedContextBus') + const storage = require_async_local_storage() + const results: Record = {} + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined + results.parent_request_id = store?.request_id + results.parent_event_id = event.event_id + const child = event.bus?.dispatch(ChildEvent({})) + if (child) { + await child.done() + } + }) + + bus.on(ChildEvent, (event) => { + const store = storage.getStore() as ContextStore | undefined + results.child_request_id = store?.request_id + results.child_event_parent_id = event.event_parent_id + }) + + await storage.run({ request_id: 'req-combined-test' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.equal(results.parent_request_id, 'req-combined-test') + assert.equal(results.child_request_id, 'req-combined-test') + assert.equal(results.child_event_parent_id, results.parent_event_id) +}) + +test('deeply nested context and parent tracking', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('DeepNestingBus') + const storage = require_async_local_storage() + const results: Array<{ + level: number + request_id?: string + event_id: string + parent_id?: string + }> = [] + + const Level2Event = BaseEvent.extend('Level2Event', {}) + const Level3Event = BaseEvent.extend('Level3Event', {}) + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined + results.push({ + level: 1, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id, + }) + const child = event.bus?.dispatch(Level2Event({})) + if (child) { + await child.done() + } + }) + + bus.on(Level2Event, async (event) => { + const store = storage.getStore() as ContextStore | undefined + results.push({ + level: 2, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id, + }) + const child = event.bus?.dispatch(Level3Event({})) + if (child) { + await child.done() + } + }) + + bus.on(Level3Event, (event) => { + const store = storage.getStore() as ContextStore | undefined + results.push({ + level: 3, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id, + }) + }) + + await storage.run({ request_id: 'req-deep-nesting' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.equal(results.length, 3) + for (const result of results) { + assert.equal(result.request_id, 'req-deep-nesting') + } + assert.equal(results[0].parent_id, undefined) + assert.equal(results[1].parent_id, results[0].event_id) + assert.equal(results[2].parent_id, results[1].event_id) +}) diff --git a/bubus-ts/tests/debounce.test.ts b/bubus-ts/tests/debounce.test.ts new file mode 100644 index 0000000..54bd49f --- /dev/null +++ b/bubus-ts/tests/debounce.test.ts @@ -0,0 +1,112 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const ParentEvent = BaseEvent.extend('ParentEvent', {}) + +const ScreenshotEvent = BaseEvent.extend('ScreenshotEvent', { target_id: z.string() }) + +const SyncEvent = BaseEvent.extend('SyncEvent', {}) + +test('simple debounce uses recent history or dispatches new', async () => { + const bus = new EventBus('DebounceBus') + + const parent_event = bus.dispatch(ParentEvent({})) + await parent_event.done() + + const child_event = parent_event.bus?.emit(ScreenshotEvent({ target_id: 'tab-1' })) + assert.ok(child_event) + await child_event.done() + + const reused_event = + (await bus.find(ScreenshotEvent, { + past: 10, + future: false, + child_of: parent_event, + })) ?? (await bus.dispatch(ScreenshotEvent({ target_id: 'fallback' })).done()) + + assert.equal(reused_event.event_id, child_event.event_id) + assert.equal(reused_event.event_parent_id, parent_event.event_id) +}) + +test('advanced debounce prefers history, then waits for future, then dispatches', async () => { + const bus = new EventBus('AdvancedDebounceBus') + + const pending_event = bus.find(SyncEvent, { past: false, future: 0.5 }) + + setTimeout(() => { + bus.dispatch(SyncEvent({})) + }, 50) + + const resolved_event = + (await bus.find(SyncEvent, { past: true, future: false })) ?? (await pending_event) ?? (await bus.dispatch(SyncEvent({})).done()) + + assert.ok(resolved_event) + assert.equal(resolved_event.event_type, 'SyncEvent') +}) + +test('debounce returns existing fresh event', async () => { + const bus = new EventBus('DebounceFreshBus') + + const original = await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done() + + const is_fresh = (event: typeof original): boolean => { + const completed_at = event.event_completed_at ? Date.parse(event.event_completed_at) : 0 + return Date.now() - completed_at < 5000 + } + + const result = + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1' && is_fresh(event), { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done()) + + assert.equal(result.event_id, original.event_id) +}) + +test('debounce dispatches new when no match', async () => { + const bus = new EventBus('DebounceNoMatchBus') + + const result = + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1', { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done()) + + assert.ok(result) + assert.equal(result.target_id, 'tab1') + assert.equal(result.event_status, 'completed') +}) + +test('debounce dispatches new when existing is stale', async () => { + const bus = new EventBus('DebounceStaleBus') + + await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done() + + const result = + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1' && false, { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done()) + + assert.ok(result) + const screenshots = Array.from(bus.event_history.values()).filter((event) => event.event_type === 'ScreenshotEvent') + assert.equal(screenshots.length, 2) +}) + +test('debounce or-chain handles sequential lookups without blocking', async () => { + const bus = new EventBus('DebounceSequentialBus') + + const result1 = + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1', { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done()) + + const result2 = + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1', { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done()) + + const result3 = + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab2', { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab2' })).done()) + + assert.equal(result1.event_id, result2.event_id) + assert.notEqual(result1.event_id, result3.event_id) + assert.equal(result3.target_id, 'tab2') +}) diff --git a/bubus-ts/tests/error_handling.test.ts b/bubus-ts/tests/error_handling.test.ts new file mode 100644 index 0000000..a3ca425 --- /dev/null +++ b/bubus-ts/tests/error_handling.test.ts @@ -0,0 +1,221 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' + +const TestEvent = BaseEvent.extend('TestEvent', {}) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('handler error is captured and does not prevent other handlers from running', async () => { + const bus = new EventBus('ErrorIsolationBus') + const results: string[] = [] + + const failing_handler = (): string => { + throw new Error('Expected to fail - testing error handling') + } + + const working_handler = (): string => { + results.push('success') + return 'worked' + } + + bus.on(TestEvent, failing_handler) + bus.on(TestEvent, working_handler) + + const event = bus.dispatch(TestEvent({})) + await event.done() + + // Both handlers should have run and produced results + assert.equal(event.event_results.size, 2) + + const failing_result = Array.from(event.event_results.values()).find((r) => r.handler_name === 'failing_handler') + assert.ok(failing_result, 'failing_handler result should exist') + assert.equal(failing_result.status, 'error') + assert.ok(failing_result.error instanceof Error) + assert.ok((failing_result.error as Error).message.includes('Expected to fail'), 'error message should contain the thrown message') + + const working_result = Array.from(event.event_results.values()).find((r) => r.handler_name === 'working_handler') + assert.ok(working_result, 'working_handler result should exist') + assert.equal(working_result.status, 'completed') + assert.equal(working_result.result, 'worked') + + // The working handler actually ran + assert.deepEqual(results, ['success']) +}) + +test('event.event_errors collects handler errors', async () => { + const bus = new EventBus('ErrorCollectionBus') + + const handler_a = (): void => { + throw new Error('error_a') + } + + const handler_b = (): void => { + throw new TypeError('error_b') + } + + const handler_c = (): string => { + return 'ok' + } + + bus.on(TestEvent, handler_a) + bus.on(TestEvent, handler_b) + bus.on(TestEvent, handler_c) + + const event = bus.dispatch(TestEvent({})) + await event.done() + + // Two errors should be collected + assert.equal(event.event_errors.length, 2) + const error_messages = event.event_errors.map((e) => (e as Error).message) + assert.ok(error_messages.includes('error_a')) + assert.ok(error_messages.includes('error_b')) +}) + +test('handler error does not prevent event completion', async () => { + const bus = new EventBus('ErrorCompletionBus') + + bus.on(TestEvent, () => { + throw new Error('handler failed') + }) + + const event = bus.dispatch(TestEvent({})) + await event.done() + + // Event should still complete even though handler errored + assert.equal(event.event_status, 'completed') + assert.ok(event.event_completed_at, 'event_completed_at should be set') + assert.equal(event.event_errors.length, 1) +}) + +test('error in one event does not affect subsequent queued events', async () => { + const bus = new EventBus('ErrorQueueBus') + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + + bus.on(Event1, () => { + throw new Error('event1 handler failed') + }) + + bus.on(Event2, () => { + return 'event2 ok' + }) + + const event_1 = bus.dispatch(Event1({})) + const event_2 = bus.dispatch(Event2({})) + + await bus.waitUntilIdle() + + // Event1 completed with error + assert.equal(event_1.event_status, 'completed') + assert.equal(event_1.event_errors.length, 1) + + // Event2 completed successfully and was not affected by Event1's error + assert.equal(event_2.event_status, 'completed') + assert.equal(event_2.event_errors.length, 0) + const result = Array.from(event_2.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'event2 ok') +}) + +test('async handler rejection is captured as error', async () => { + const bus = new EventBus('AsyncErrorBus') + + const async_failing_handler = async (): Promise => { + await delay(1) + throw new Error('async rejection') + } + + bus.on(TestEvent, async_failing_handler) + + const event = bus.dispatch(TestEvent({})) + await event.done() + + assert.equal(event.event_status, 'completed') + assert.equal(event.event_errors.length, 1) + assert.ok((event.event_errors[0] as Error).message.includes('async rejection')) + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') +}) + +test('error in forwarded event handler does not block source bus', async () => { + const bus_a = new EventBus('ErrorForwardA') + const bus_b = new EventBus('ErrorForwardB') + + const ForwardEvent = BaseEvent.extend('ForwardEvent', {}) + + // Forward from A to B + bus_a.on('*', bus_b.dispatch) + + // Handler on bus_b throws + bus_b.on(ForwardEvent, () => { + throw new Error('bus_b handler failed') + }) + + // Handler on bus_a succeeds + bus_a.on(ForwardEvent, () => { + return 'bus_a ok' + }) + + const event = bus_a.dispatch(ForwardEvent({})) + await event.done() + + assert.equal(event.event_status, 'completed') + + // bus_a's handler succeeded + const bus_a_result = Array.from(event.event_results.values()).find( + (r) => r.eventbus_name === 'ErrorForwardA' && r.handler_name !== 'dispatch' + ) + assert.ok(bus_a_result) + assert.equal(bus_a_result.status, 'completed') + assert.equal(bus_a_result.result, 'bus_a ok') + + // bus_b's handler errored + const bus_b_result = Array.from(event.event_results.values()).find( + (r) => r.eventbus_name === 'ErrorForwardB' && r.handler_name !== 'dispatch' + ) + assert.ok(bus_b_result) + assert.equal(bus_b_result.status, 'error') + + // Both errors tracked + assert.ok(event.event_errors.length >= 1) +}) + +test('event with no handlers completes without errors', async () => { + const bus = new EventBus('NoHandlerBus') + const OrphanEvent = BaseEvent.extend('OrphanEvent', {}) + + const event = bus.dispatch(OrphanEvent({})) + await event.done() + + assert.equal(event.event_status, 'completed') + assert.equal(event.event_results.size, 0) + assert.equal(event.event_errors.length, 0) +}) + +test('error handler result fields are populated correctly', async () => { + const bus = new EventBus('ErrorFieldsBus') + + const my_handler = (): void => { + throw new RangeError('out of range') + } + + bus.on(TestEvent, my_handler) + + const event = bus.dispatch(TestEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.equal(result.handler_name, 'my_handler') + assert.equal(result.eventbus_name, 'ErrorFieldsBus') + assert.ok(result.error instanceof RangeError) + assert.equal((result.error as RangeError).message, 'out of range') + assert.ok(result.started_at, 'started_at should be set') + assert.ok(result.completed_at, 'completed_at should be set even on error') +}) diff --git a/bubus-ts/tests/event_bus_proxy.test.ts b/bubus-ts/tests/event_bus_proxy.test.ts new file mode 100644 index 0000000..0a910ad --- /dev/null +++ b/bubus-ts/tests/event_bus_proxy.test.ts @@ -0,0 +1,232 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' + +const MainEvent = BaseEvent.extend('MainEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', {}) + +test('event.bus inside handler returns the dispatching bus', async () => { + const bus = new EventBus('TestBus') + + let handler_called = false + let handler_bus_name: string | undefined + let child_event: BaseEvent | undefined + + bus.on(MainEvent, (event) => { + handler_called = true + handler_bus_name = event.bus?.name + + // Should be able to dispatch child events using event.bus + child_event = event.bus?.emit(ChildEvent({})) + }) + + bus.on(ChildEvent, () => {}) + + bus.dispatch(MainEvent({})) + await bus.waitUntilIdle() + + assert.equal(handler_called, true) + assert.equal(handler_bus_name, 'TestBus') + assert.ok(child_event, 'child event should have been dispatched via event.bus') + assert.equal(child_event!.event_type, 'ChildEvent') +}) + +test('event.bus returns correct bus when multiple buses exist', async () => { + const bus1 = new EventBus('Bus1') + const bus2 = new EventBus('Bus2') + + let handler1_bus_name: string | undefined + let handler2_bus_name: string | undefined + + bus1.on(MainEvent, (event) => { + handler1_bus_name = event.bus?.name + }) + + bus2.on(MainEvent, (event) => { + handler2_bus_name = event.bus?.name + }) + + bus1.dispatch(MainEvent({})) + await bus1.waitUntilIdle() + + bus2.dispatch(MainEvent({})) + await bus2.waitUntilIdle() + + assert.equal(handler1_bus_name, 'Bus1') + assert.equal(handler2_bus_name, 'Bus2') +}) + +test('event.bus reflects the currently-processing bus when forwarded', async () => { + const bus1 = new EventBus('Bus1') + const bus2 = new EventBus('Bus2') + + // Forward all events from bus1 to bus2 + bus1.on('*', bus2.dispatch) + + let bus2_handler_bus_name: string | undefined + + bus2.on(MainEvent, (event) => { + bus2_handler_bus_name = event.bus?.name + }) + + const event = bus1.dispatch(MainEvent({})) + await bus1.waitUntilIdle() + await bus2.waitUntilIdle() + + // The handler on bus2 should see bus2 as event.bus, not bus1 + assert.equal(bus2_handler_bus_name, 'Bus2') + assert.deepEqual(event.event_path, ['Bus1', 'Bus2']) +}) + +test('event.bus in nested handlers sees the same bus', async () => { + const bus = new EventBus('MainBus') + + let outer_bus_name: string | undefined + let inner_bus_name: string | undefined + + bus.on(MainEvent, async (event) => { + outer_bus_name = event.bus?.name + + // Dispatch child using event.bus + const child = event.bus!.emit(ChildEvent({})) + await child.done() + }) + + bus.on(ChildEvent, (event) => { + inner_bus_name = event.bus?.name + }) + + const parent = bus.dispatch(MainEvent({})) + await parent.done() + + assert.equal(outer_bus_name, 'MainBus') + assert.equal(inner_bus_name, 'MainBus') +}) + +test('event.bus.dispatch sets parent-child relationships through 3 levels', async () => { + const bus = new EventBus('MainBus') + + const execution_order: string[] = [] + let child_ref: BaseEvent | undefined + let grandchild_ref: BaseEvent | undefined + + bus.on(MainEvent, async (event) => { + execution_order.push('parent_start') + assert.equal(event.bus?.name, 'MainBus') + + child_ref = event.bus!.emit(ChildEvent({})) + await child_ref.done() + + execution_order.push('parent_end') + }) + + bus.on(ChildEvent, async (event) => { + execution_order.push('child_start') + assert.equal(event.bus?.name, 'MainBus') + + grandchild_ref = event.bus!.emit(GrandchildEvent({})) + await grandchild_ref.done() + + execution_order.push('child_end') + }) + + bus.on(GrandchildEvent, (event) => { + execution_order.push('grandchild_start') + assert.equal(event.bus?.name, 'MainBus') + execution_order.push('grandchild_end') + }) + + const parent_event = bus.dispatch(MainEvent({})) + await parent_event.done() + + // Child events should queue-jump and complete before their parents return + assert.deepEqual(execution_order, ['parent_start', 'child_start', 'grandchild_start', 'grandchild_end', 'child_end', 'parent_end']) + + // All events completed + assert.equal(parent_event.event_status, 'completed') + assert.ok(child_ref) + assert.equal(child_ref!.event_status, 'completed') + assert.ok(grandchild_ref) + assert.equal(grandchild_ref!.event_status, 'completed') + + // Parent-child relationships are set correctly + assert.equal(child_ref!.event_parent_id, parent_event.event_id) + assert.equal(grandchild_ref!.event_parent_id, child_ref!.event_id) + assert.equal(child_ref!.event_parent?.event_id, parent_event.event_id) + assert.equal(grandchild_ref!.event_parent?.event_id, child_ref!.event_id) +}) + +test('event.bus with forwarding: child dispatched via event.bus goes to the correct bus', async () => { + const bus1 = new EventBus('Bus1') + const bus2 = new EventBus('Bus2') + + // Forward all events from bus1 to bus2 + bus1.on('*', bus2.dispatch) + + let child_handler_bus_name: string | undefined + + // Handlers only on bus2 + bus2.on(MainEvent, async (event) => { + // Handler runs on bus2 (forwarded from bus1) + assert.equal(event.bus?.name, 'Bus2') + + // Child dispatched via event.bus should go to bus2 + const child = event.bus!.emit(ChildEvent({})) + await child.done() + }) + + bus2.on(ChildEvent, (event) => { + child_handler_bus_name = event.bus?.name + }) + + bus1.dispatch(MainEvent({})) + await bus1.waitUntilIdle() + await bus2.waitUntilIdle() + + // Child handler should have seen bus2 + assert.equal(child_handler_bus_name, 'Bus2') +}) + +test('event.bus is set on the event after dispatch (outside handler)', async () => { + const bus = new EventBus('TestBus') + + // Before dispatch, bus is not set + const raw_event = MainEvent({}) + assert.equal(raw_event.bus, undefined) + + // After dispatch, bus is set on the original event + const dispatched = bus.dispatch(raw_event) + assert.ok(dispatched.bus, 'event.bus should be set after dispatch') + + await bus.waitUntilIdle() +}) + +test('event.bus.dispatch from handler correctly attributes event_emitted_by_handler_id', async () => { + const bus = new EventBus('TestBus') + + bus.on(MainEvent, (event) => { + event.bus?.emit(ChildEvent({})) + }) + + bus.on(ChildEvent, () => {}) + + const parent = bus.dispatch(MainEvent({})) + await bus.waitUntilIdle() + + // Find the child event in history + const child = Array.from(bus.event_history.values()).find((e) => e.event_type === 'ChildEvent') + assert.ok(child, 'child event should be in history') + assert.equal(child!.event_parent_id, parent.event_id) + assert.equal(child!.event_parent?.event_id, parent.event_id) + + // The child should have event_emitted_by_handler_id set to the handler that emitted it + assert.ok(child!.event_emitted_by_handler_id, 'event_emitted_by_handler_id should be set on child events dispatched via event.bus') + + // The handler id should correspond to a handler result on the parent event + const parent_from_history = Array.from(bus.event_history.values()).find((e) => e.event_type === 'MainEvent') + assert.ok(parent_from_history) + const handler_result = parent_from_history!.event_results.get(child!.event_emitted_by_handler_id!) + assert.ok(handler_result, 'handler_id on child should match a handler result on the parent') +}) diff --git a/bubus-ts/tests/event_results.test.ts b/bubus-ts/tests/event_results.test.ts new file mode 100644 index 0000000..cda71ff --- /dev/null +++ b/bubus-ts/tests/event_results.test.ts @@ -0,0 +1,83 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const StringResultEvent = BaseEvent.extend('StringResultEvent', { + event_result_schema: z.string(), + event_result_type: 'string', +}) + +const ObjectResultEvent = BaseEvent.extend('ObjectResultEvent', { + event_result_schema: z.object({ value: z.string(), count: z.number() }), +}) + +const NoResultSchemaEvent = BaseEvent.extend('NoResultSchemaEvent', {}) + +test('event results capture handler return values', async () => { + const bus = new EventBus('ResultCaptureBus') + + bus.on(StringResultEvent, () => 'ok') + + const event = bus.dispatch(StringResultEvent({})) + await event.done() + + assert.equal(event.event_results.size, 1) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'ok') +}) + +test('event_result_schema validates handler results', async () => { + const bus = new EventBus('ResultSchemaBus') + + bus.on(ObjectResultEvent, () => ({ value: 'hello', count: 2 })) + + const event = bus.dispatch(ObjectResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { value: 'hello', count: 2 }) +}) + +test('event_result_schema allows undefined handler return values', async () => { + const bus = new EventBus('ResultSchemaUndefinedBus') + + bus.on(ObjectResultEvent, () => {}) + + const event = bus.dispatch(ObjectResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, undefined) +}) + +test('invalid result marks handler error', async () => { + const bus = new EventBus('ResultSchemaErrorBus') + + bus.on(ObjectResultEvent, () => ({ value: 'bad', count: 'nope' }) as unknown) + + const event = bus.dispatch(ObjectResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof Error) +}) + +test('event with no result schema stores raw values', async () => { + const bus = new EventBus('NoSchemaBus') + + bus.on(NoResultSchemaEvent, () => ({ raw: true })) + + const event = bus.dispatch(NoResultSchemaEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { raw: true }) +}) diff --git a/bubus-ts/tests/eventbus_basics.test.ts b/bubus-ts/tests/eventbus_basics.test.ts new file mode 100644 index 0000000..ac3fbcc --- /dev/null +++ b/bubus-ts/tests/eventbus_basics.test.ts @@ -0,0 +1,679 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' +import { LockManager } from '../src/lock_manager.js' +import { z } from 'zod' + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +// ─── Constructor defaults ──────────────────────────────────────────────────── + +test('EventBus initializes with correct defaults', async () => { + const bus = new EventBus('DefaultsBus') + + assert.equal(bus.name, 'DefaultsBus') + assert.equal(bus.max_history_size, 100) + assert.equal(bus.event_concurrency_default, 'bus-serial') + assert.equal(bus.event_handler_concurrency_default, 'bus-serial') + assert.equal(bus.event_timeout_default, 60) + assert.equal(bus.event_history.size, 0) + assert.ok(EventBus._all_instances.has(bus)) + await bus.waitUntilIdle() +}) + +test('EventBus applies custom options', () => { + const bus = new EventBus('CustomBus', { + max_history_size: 500, + event_concurrency: 'parallel', + event_handler_concurrency: 'global-serial', + event_timeout: 30, + }) + + assert.equal(bus.max_history_size, 500) + assert.equal(bus.event_concurrency_default, 'parallel') + assert.equal(bus.event_handler_concurrency_default, 'global-serial') + assert.equal(bus.event_timeout_default, 30) +}) + +test('EventBus with null max_history_size means unlimited', () => { + const bus = new EventBus('UnlimitedBus', { max_history_size: null }) + assert.equal(bus.max_history_size, null) +}) + +test('EventBus with null event_timeout disables timeouts', () => { + const bus = new EventBus('NoTimeoutBus', { event_timeout: null }) + assert.equal(bus.event_timeout_default, null) +}) + +test('EventBus auto-generates name when not provided', () => { + const bus = new EventBus() + assert.equal(bus.name, 'EventBus') +}) + +test('EventBus exposes locks API surface', () => { + const bus = new EventBus('GateSurfaceBus') + const locks = bus.locks as unknown as Record + + assert.equal(typeof locks.requestPause, 'function') + assert.equal(typeof locks.waitUntilRunloopResumed, 'function') + assert.equal(typeof locks.isPaused, 'function') + assert.equal(typeof locks.waitForIdle, 'function') + assert.equal(typeof locks.notifyIdleListeners, 'function') + assert.equal(typeof locks.getSemaphoreForEvent, 'function') + assert.equal(typeof locks.getSemaphoreForHandler, 'function') +}) + +test('EventBus locks methods are callable and preserve semaphore resolution behavior', async () => { + const bus = new EventBus('GateInvocationBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + const GateEvent = BaseEvent.extend('GateInvocationEvent', {}) + + const release_pause = bus.locks.requestPause() + assert.equal(bus.locks.isPaused(), true) + + let resumed = false + const resumed_promise = bus.locks.waitUntilRunloopResumed().then(() => { + resumed = true + }) + await Promise.resolve() + assert.equal(resumed, false) + + release_pause() + await resumed_promise + assert.equal(bus.locks.isPaused(), false) + + const event_with_global = GateEvent({ + event_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', + }) + assert.equal(bus.locks.getSemaphoreForEvent(event_with_global), LockManager.global_event_semaphore) + assert.equal(bus.locks.getSemaphoreForHandler(event_with_global), LockManager.global_handler_semaphore) + + const event_with_parallel = GateEvent({ + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + assert.equal(bus.locks.getSemaphoreForEvent(event_with_parallel), null) + assert.equal(bus.locks.getSemaphoreForHandler(event_with_parallel), null) + + const event_using_handler_options = GateEvent({}) + assert.equal(bus.locks.getSemaphoreForHandler(event_using_handler_options, { event_handler_concurrency: 'parallel' }), null) + + bus.dispatch(GateEvent({})) + bus.locks.notifyIdleListeners() + await bus.locks.waitForIdle() +}) + +test('BaseEvent lifecycle methods are callable and preserve lifecycle behavior', async () => { + const LifecycleEvent = BaseEvent.extend('LifecycleMethodInvocationEvent', {}) + + const standalone = LifecycleEvent({}) + standalone.markStarted() + assert.equal(standalone.event_status, 'started') + standalone.markCompleted(false) + assert.equal(standalone.event_status, 'completed') + await standalone.waitForCompletion() + + const bus = new EventBus('LifecycleMethodInvocationBus') + const dispatched = bus.dispatch(LifecycleEvent({})) + await dispatched.waitForCompletion() + assert.equal(dispatched.event_status, 'completed') +}) + +test('BaseEvent toJSON/fromJSON roundtrips runtime fields and event_results', async () => { + const RuntimeEvent = BaseEvent.extend('RuntimeSerializationEvent', { + event_result_schema: z.string(), + }) + const bus = new EventBus('RuntimeSerializationBus') + + bus.on(RuntimeEvent, () => 'ok') + + const event = bus.dispatch(RuntimeEvent({})) + await event.done() + + const json = event.toJSON() as Record + assert.equal(json.event_status, 'completed') + assert.equal(typeof json.event_created_ts, 'number') + assert.equal(typeof json.event_started_ts, 'number') + assert.equal(typeof json.event_completed_ts, 'number') + assert.equal(json.event_pending_bus_count, 0) + assert.ok(Array.isArray(json.event_results)) + const json_results = json.event_results as Array> + assert.equal(json_results.length, 1) + assert.equal(json_results[0].status, 'completed') + assert.equal(json_results[0].result, 'ok') + assert.equal((json_results[0].handler as Record).id, Array.from(event.event_results.values())[0].handler_id) + + const restored = RuntimeEvent.fromJSON?.(json) ?? RuntimeEvent(json as never) + assert.equal(restored.event_status, 'completed') + assert.equal(restored.event_created_ts, event.event_created_ts) + assert.equal(restored.event_pending_bus_count, 0) + assert.equal(restored.event_results.size, 1) + const restored_result = Array.from(restored.event_results.values())[0] + assert.equal(restored_result.status, 'completed') + assert.equal(restored_result.result, 'ok') +}) + +// ─── Event dispatch and status lifecycle ───────────────────────────────────── + +test('dispatch returns pending event with correct initial state', async () => { + const bus = new EventBus('LifecycleBus', { max_history_size: 100 }) + const TestEvent = BaseEvent.extend('TestEvent', { data: z.string() }) + + const event = bus.dispatch(TestEvent({ data: 'hello' })) + + // Immediate state after dispatch (before any microtask runs) + assert.equal(event.event_type, 'TestEvent') + assert.ok(event.event_id) + assert.ok(event.event_created_at) + assert.equal((event as any).data, 'hello') + + // event_path should include the bus name + const original = event._event_original ?? event + assert.ok(original.event_path.includes('LifecycleBus')) + + await bus.waitUntilIdle() +}) + +test('event transitions through pending -> started -> completed', async () => { + const bus = new EventBus('StatusBus', { max_history_size: 100 }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + let status_during_handler: string | undefined + + bus.on(TestEvent, (event: BaseEvent) => { + status_during_handler = event.event_status + return 'done' + }) + + const event = bus.dispatch(TestEvent({})) + const original = event._event_original ?? event + + await event.done() + + assert.equal(status_during_handler, 'started') + assert.equal(original.event_status, 'completed') + assert.ok(original.event_started_at, 'event_started_at should be set') + assert.ok(original.event_completed_at, 'event_completed_at should be set') +}) + +test('event with no handlers completes immediately', async () => { + const bus = new EventBus('NoHandlerBus', { max_history_size: 100 }) + const OrphanEvent = BaseEvent.extend('OrphanEvent', {}) + + const event = bus.dispatch(OrphanEvent({})) + await event.done() + + const original = event._event_original ?? event + assert.equal(original.event_status, 'completed') + assert.equal(original.event_results.size, 0) +}) + +// ─── Event history tracking ────────────────────────────────────────────────── + +test('dispatched events appear in event_history', async () => { + const bus = new EventBus('HistoryBus', { max_history_size: 100 }) + const EventA = BaseEvent.extend('EventA', {}) + const EventB = BaseEvent.extend('EventB', {}) + + bus.dispatch(EventA({})) + bus.dispatch(EventB({})) + await bus.waitUntilIdle() + + assert.equal(bus.event_history.size, 2) + const history = Array.from(bus.event_history.values()) + assert.equal(history[0].event_type, 'EventA') + assert.equal(history[1].event_type, 'EventB') + + // All events are accessible by id + for (const event of bus.event_history.values()) { + assert.ok(bus.event_history.has(event.event_id)) + } +}) + +// ─── History trimming (max_history_size) ───────────────────────────────────── + +test('history is trimmed to max_history_size, completed events removed first', async () => { + const bus = new EventBus('TrimBus', { max_history_size: 5 }) + const TrimEvent = BaseEvent.extend('TrimEvent', { seq: z.number() }) + + bus.on(TrimEvent, () => 'ok') + + // Dispatch 10 events; they'll process and complete in FIFO order + for (let i = 0; i < 10; i++) { + bus.dispatch(TrimEvent({ seq: i })) + } + await bus.waitUntilIdle() + + // History should be trimmed to at most max_history_size + assert.ok(bus.event_history.size <= 5, `expected <= 5, got ${bus.event_history.size}`) + + // The remaining events should be the MOST RECENT ones (oldest completed removed first) + const seqs = Array.from(bus.event_history.values()).map((e) => (e as any).seq as number) + for (let i = 1; i < seqs.length; i++) { + assert.ok(seqs[i] > seqs[i - 1], 'remaining history should be in order') + } +}) + +test('unlimited history (max_history_size: null) keeps all events', async () => { + const bus = new EventBus('UnlimitedHistBus', { max_history_size: null }) + const PingEvent = BaseEvent.extend('PingEvent', {}) + + bus.on(PingEvent, () => 'pong') + + for (let i = 0; i < 150; i++) { + bus.dispatch(PingEvent({})) + } + await bus.waitUntilIdle() + + assert.equal(bus.event_history.size, 150) + + // All completed + for (const event of bus.event_history.values()) { + assert.equal(event.event_status, 'completed') + } +}) + +// ─── Event type derivation ─────────────────────────────────────────────────── + +test('event_type is derived from extend() name argument', () => { + const MyCustomEvent = BaseEvent.extend('MyCustomEvent', { val: z.number() }) + const event = MyCustomEvent({ val: 42 }) + assert.equal(event.event_type, 'MyCustomEvent') +}) + +test('event_type can be overridden at instantiation', () => { + const FlexEvent = BaseEvent.extend('FlexEvent', {}) + const event = FlexEvent({ event_type: 'OverriddenType' }) + assert.equal(event.event_type, 'OverriddenType') +}) + +test('handler registration by string matches extend() name', async () => { + const bus = new EventBus('StringMatchBus', { max_history_size: 100 }) + const NamedEvent = BaseEvent.extend('NamedEvent', {}) + const received: string[] = [] + + bus.on('NamedEvent', () => { + received.push('string_handler') + }) + + bus.dispatch(NamedEvent({})) + await bus.waitUntilIdle() + + assert.equal(received.length, 1) + assert.equal(received[0], 'string_handler') +}) + +test('wildcard handler receives all events', async () => { + const bus = new EventBus('WildcardBus', { max_history_size: 100 }) + const EventA = BaseEvent.extend('EventA', {}) + const EventB = BaseEvent.extend('EventB', {}) + const types: string[] = [] + + bus.on('*', (event: BaseEvent) => { + types.push(event.event_type) + }) + + bus.dispatch(EventA({})) + bus.dispatch(EventB({})) + await bus.waitUntilIdle() + + assert.deepEqual(types, ['EventA', 'EventB']) +}) + +// ─── Error handling and isolation ──────────────────────────────────────────── + +test('handler error is captured without crashing the bus', async () => { + const bus = new EventBus('ErrorBus', { max_history_size: 100 }) + const ErrorEvent = BaseEvent.extend('ErrorEvent', {}) + + bus.on(ErrorEvent, () => { + throw new Error('handler blew up') + }) + + const event = bus.dispatch(ErrorEvent({})) + await event.done() + + const original = event._event_original ?? event + assert.equal(original.event_status, 'completed') + assert.ok(original.event_errors.length > 0, 'event should record the error') + + // The handler result should have error status + const results = Array.from(original.event_results.values()) + assert.equal(results.length, 1) + assert.equal(results[0].status, 'error') + assert.ok(results[0].error instanceof Error) + assert.equal((results[0].error as Error).message, 'handler blew up') +}) + +test('one handler error does not prevent other handlers from running', async () => { + const bus = new EventBus('IsolationBus', { + max_history_size: 100, + event_handler_concurrency: 'parallel', + }) + const MultiEvent = BaseEvent.extend('MultiEvent', {}) + + const results_seen: string[] = [] + + bus.on(MultiEvent, () => { + results_seen.push('handler_1_ok') + return 'result_1' + }) + bus.on(MultiEvent, () => { + throw new Error('handler_2_fails') + }) + bus.on(MultiEvent, () => { + results_seen.push('handler_3_ok') + return 'result_3' + }) + + const event = bus.dispatch(MultiEvent({})) + await event.done() + + const original = event._event_original ?? event + assert.equal(original.event_status, 'completed') + + // Both non-erroring handlers should have run + assert.ok(results_seen.includes('handler_1_ok')) + assert.ok(results_seen.includes('handler_3_ok')) + + // Check individual results + const all_results = Array.from(original.event_results.values()) + const completed_results = all_results.filter((r) => r.status === 'completed') + const error_results = all_results.filter((r) => r.status === 'error') + assert.equal(completed_results.length, 2) + assert.equal(error_results.length, 1) +}) + +// ─── Concurrent dispatch ───────────────────────────────────────────────────── + +test('many events dispatched concurrently all complete', async () => { + const bus = new EventBus('ConcurrentBus', { max_history_size: null }) + const BatchEvent = BaseEvent.extend('BatchEvent', { idx: z.number() }) + let processed = 0 + + bus.on(BatchEvent, () => { + processed += 1 + return 'ok' + }) + + const events: BaseEvent[] = [] + for (let i = 0; i < 100; i++) { + events.push(bus.dispatch(BatchEvent({ idx: i }))) + } + + // Wait for all to complete + await Promise.all(events.map((e) => e.done())) + await bus.waitUntilIdle() + + assert.equal(processed, 100) + assert.equal(bus.event_history.size, 100) + + for (const event of bus.event_history.values()) { + assert.equal(event.event_status, 'completed') + } +}) + +// ─── event_timeout default application ─────────────────────────────────────── + +test('dispatch applies bus event_timeout_default when event has null timeout', async () => { + const bus = new EventBus('TimeoutDefaultBus', { + max_history_size: 100, + event_timeout: 42, + }) + const TEvent = BaseEvent.extend('TEvent', {}) + + const event = bus.dispatch(TEvent({})) + const original = event._event_original ?? event + + // The bus should have applied its default timeout + assert.equal(original.event_timeout, 42) + + await bus.waitUntilIdle() +}) + +test('event with explicit timeout is not overridden by bus default', async () => { + const bus = new EventBus('TimeoutOverrideBus', { + max_history_size: 100, + event_timeout: 42, + }) + const TEvent = BaseEvent.extend('TEvent', {}) + + const event = bus.dispatch(TEvent({ event_timeout: 10 })) + const original = event._event_original ?? event + + assert.equal(original.event_timeout, 10) + + await bus.waitUntilIdle() +}) + +// ─── EventBus._all_instances tracking ───────────────────────────────────────────── + +test('EventBus._all_instances tracks all created buses', () => { + const initial_count = EventBus._all_instances.size + const bus_a = new EventBus('TrackA') + const bus_b = new EventBus('TrackB') + + assert.ok(EventBus._all_instances.has(bus_a)) + assert.ok(EventBus._all_instances.has(bus_b)) + assert.equal(EventBus._all_instances.size, initial_count + 2) +}) + +// ─── Circular forwarding prevention ────────────────────────────────────────── + +test('circular forwarding does not cause infinite loop', async () => { + const bus_a = new EventBus('CircA', { max_history_size: 100 }) + const bus_b = new EventBus('CircB', { max_history_size: 100 }) + const bus_c = new EventBus('CircC', { max_history_size: 100 }) + + // A -> B -> C -> A (circular) + bus_a.on('*', bus_b.dispatch) + bus_b.on('*', bus_c.dispatch) + bus_c.on('*', bus_a.dispatch) + + const CircEvent = BaseEvent.extend('CircEvent', {}) + const handler_calls: string[] = [] + + // Register real handlers on each bus + bus_a.on(CircEvent, () => { + handler_calls.push('A') + return 'a' + }) + bus_b.on(CircEvent, () => { + handler_calls.push('B') + return 'b' + }) + bus_c.on(CircEvent, () => { + handler_calls.push('C') + return 'c' + }) + + const event = bus_a.dispatch(CircEvent({})) + await event.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() + + // Each bus should process the event exactly once (loop prevention via event_path) + assert.equal(handler_calls.filter((h) => h === 'A').length, 1) + assert.equal(handler_calls.filter((h) => h === 'B').length, 1) + assert.equal(handler_calls.filter((h) => h === 'C').length, 1) + + // event_path should contain all three buses + const original = event._event_original ?? event + assert.ok(original.event_path.includes('CircA')) + assert.ok(original.event_path.includes('CircB')) + assert.ok(original.event_path.includes('CircC')) +}) + +// ─── EventBus GC / memory leak ─────────────────────────────────────────────── + +test('unreferenced EventBus can be garbage collected (not retained by _all_instances)', async () => { + // This test requires --expose-gc to force garbage collection + const gc = globalThis.gc as (() => void) | undefined + if (typeof gc !== 'function') { + // Can't test GC without --expose-gc; skip gracefully + return + } + + let weak_ref: WeakRef + + // Create a bus inside an IIFE so the only reference is the WeakRef + ;(() => { + const bus = new EventBus('GCTestBus') + weak_ref = new WeakRef(bus) + })() + + // Force garbage collection + gc() + await delay(50) + gc() + + // If EventBus._all_instances holds a strong reference (Set), + // the bus will NOT be collected β€” proving the memory leak. + // After the fix (WeakRef-based storage), the bus should be collected. + assert.equal( + weak_ref!.deref(), + undefined, + 'bus should be garbage collected when no external references remain β€” ' + + 'EventBus._all_instances is holding a strong reference (memory leak)' + ) +}) + +test('unreferenced buses with event history are garbage collected without destroy()', async () => { + const gc = globalThis.gc as (() => void) | undefined + if (typeof gc !== 'function') { + return + } + + const GcEvent = BaseEvent.extend('GcNoDestroyEvent', {}) + const weak_refs: Array> = [] + + gc() + await delay(20) + gc() + const heap_before = process.memoryUsage().heapUsed + + const create_and_run_bus = async (index: number): Promise> => { + const bus = new EventBus(`GC-NoDestroy-${index}`, { max_history_size: 200 }) + bus.on(GcEvent, () => {}) + for (let i = 0; i < 200; i += 1) { + const event = bus.dispatch(GcEvent({})) + await event.done() + } + await bus.waitUntilIdle() + return new WeakRef(bus) + } + + for (let i = 0; i < 120; i += 1) { + weak_refs.push(await create_and_run_bus(i)) + } + + for (let i = 0; i < 30; i += 1) { + gc() + await delay(20) + } + + const alive_count = weak_refs.reduce((count, ref) => count + (ref.deref() ? 1 : 0), 0) + const heap_after = process.memoryUsage().heapUsed + + assert.equal(alive_count, 0, 'all unreferenced buses should be garbage collected without explicit destroy()') + assert.equal(EventBus._all_instances.size, 0, '_all_instances should not retain unreferenced buses') + assert.ok( + heap_after <= heap_before + 20 * 1024 * 1024, + `heap should return near baseline after GC, before=${(heap_before / 1024 / 1024).toFixed(1)}MB after=${(heap_after / 1024 / 1024).toFixed(1)}MB` + ) +}) + +// ─── off() handler deregistration ──────────────────────────────────────────── + +test('off() removes a handler so it no longer fires', async () => { + const bus = new EventBus('OffBus', { max_history_size: 100 }) + const OffEvent = BaseEvent.extend('OffEvent', {}) + let call_count = 0 + + const handler = () => { + call_count += 1 + } + + bus.on(OffEvent, handler) + bus.dispatch(OffEvent({})) + await bus.waitUntilIdle() + assert.equal(call_count, 1) + + bus.off(OffEvent, handler) + bus.dispatch(OffEvent({})) + await bus.waitUntilIdle() + assert.equal(call_count, 1, 'handler should not fire after off()') +}) + +test('off() removes a handler by handler_id string', async () => { + const bus = new EventBus('OffByIdBus', { max_history_size: 100 }) + const OffIdEvent = BaseEvent.extend('OffIdEvent', {}) + let call_count = 0 + + bus.on(OffIdEvent, function my_handler() { + call_count += 1 + }) + + // Dispatch once so we can find the handler_id from the event results + const event1 = bus.dispatch(OffIdEvent({})) + await bus.waitUntilIdle() + assert.equal(call_count, 1) + + // Get the handler_id from the event's results + const results = Array.from(event1.event_results.values()) + assert.equal(results.length, 1, 'should have exactly one handler result') + const handler_id = results[0].handler_id + assert.ok(handler_id, 'handler_id should exist') + + // Remove by handler_id string + bus.off(OffIdEvent, handler_id) + + // Dispatch again β€” handler should NOT fire + bus.dispatch(OffIdEvent({})) + await bus.waitUntilIdle() + assert.equal(call_count, 1, 'handler should not fire after off() by handler_id') +}) + +test('off() with no handler removes all handlers for that event', async () => { + const bus = new EventBus('OffAllBus', { max_history_size: 100 }) + const OffAllEvent = BaseEvent.extend('OffAllEvent', {}) + const OtherEvent = BaseEvent.extend('OffAllOther', {}) + let call_count_a = 0 + let call_count_b = 0 + let other_count = 0 + + bus.on(OffAllEvent, () => { + call_count_a += 1 + }) + bus.on(OffAllEvent, () => { + call_count_b += 1 + }) + bus.on(OtherEvent, () => { + other_count += 1 + }) + + bus.dispatch(OffAllEvent({})) + await bus.waitUntilIdle() + assert.equal(call_count_a, 1) + assert.equal(call_count_b, 1) + + // Remove ALL handlers for OffAllEvent + bus.off(OffAllEvent) + + bus.dispatch(OffAllEvent({})) + bus.dispatch(OtherEvent({})) + await bus.waitUntilIdle() + + // Neither OffAllEvent handler should fire + assert.equal(call_count_a, 1, 'handler A should not fire after off(event)') + assert.equal(call_count_b, 1, 'handler B should not fire after off(event)') + // OtherEvent handler should still work + assert.equal(other_count, 1, 'unrelated handler should still fire') +}) diff --git a/bubus-ts/tests/fifo.test.ts b/bubus-ts/tests/fifo.test.ts new file mode 100644 index 0000000..80042fd --- /dev/null +++ b/bubus-ts/tests/fifo.test.ts @@ -0,0 +1,44 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const OrderEvent = BaseEvent.extend('OrderEvent', { order: z.number() }) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('events are processed in FIFO order', async () => { + const bus = new EventBus('FifoBus') + + const processed_orders: number[] = [] + const handler_start_times: number[] = [] + + bus.on(OrderEvent, async (event) => { + handler_start_times.push(Date.now()) + if (event.order % 2 === 0) { + await delay(30) + } else { + await delay(5) + } + processed_orders.push(event.order) + }) + + for (let i = 0; i < 10; i += 1) { + bus.dispatch(OrderEvent({ order: i })) + } + + await bus.waitUntilIdle() + + assert.deepEqual( + processed_orders, + Array.from({ length: 10 }, (_, i) => i) + ) + for (let i = 1; i < handler_start_times.length; i += 1) { + assert.ok(handler_start_times[i] >= handler_start_times[i - 1]) + } +}) diff --git a/bubus-ts/tests/find.test.ts b/bubus-ts/tests/find.test.ts new file mode 100644 index 0000000..ea160a5 --- /dev/null +++ b/bubus-ts/tests/find.test.ts @@ -0,0 +1,551 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const ParentEvent = BaseEvent.extend('ParentEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', {}) +const UnrelatedEvent = BaseEvent.extend('UnrelatedEvent', {}) +const ScreenshotEvent = BaseEvent.extend('ScreenshotEvent', { target_id: z.string() }) +const NavigateEvent = BaseEvent.extend('NavigateEvent', { url: z.string() }) +const TabCreatedEvent = BaseEvent.extend('TabCreatedEvent', { tab_id: z.string() }) +const SystemEvent = BaseEvent.extend('SystemEvent', {}) +const UserActionEvent = BaseEvent.extend('UserActionEvent', { + action: z.string(), + user_id: z.string(), +}) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('find past returns most recent completed event', async () => { + const bus = new EventBus('FindPastBus') + + const first_event = bus.dispatch(ParentEvent({})) + await first_event.done() + await delay(20) + const second_event = bus.dispatch(ParentEvent({})) + await second_event.done() + + const found_event = await bus.find(ParentEvent, { past: true, future: false }) + assert.ok(found_event) + assert.equal(found_event.event_id, second_event.event_id) +}) + +test('find past returns null when no matching event exists', async () => { + const bus = new EventBus('FindPastNoneBus') + + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: true, future: false }) + const elapsed_ms = Date.now() - start + + assert.equal(found_event, null) + assert.ok(elapsed_ms < 100) +}) + +test('find past window filters by time', async () => { + const bus = new EventBus('FindWindowBus') + + const old_event = bus.dispatch(ParentEvent({})) + await old_event.done() + await delay(120) + const new_event = bus.dispatch(ParentEvent({})) + await new_event.done() + + const found_event = await bus.find(ParentEvent, { past: 0.1, future: false }) + assert.ok(found_event) + assert.equal(found_event.event_id, new_event.event_id) +}) + +test('find past returns null when all events are too old', async () => { + const bus = new EventBus('FindTooOldBus') + + const old_event = bus.dispatch(ParentEvent({})) + await old_event.done() + await delay(120) + + const found_event = await bus.find(ParentEvent, { past: 0.05, future: false }) + assert.equal(found_event, null) +}) + +test('find future waits for event', async () => { + const bus = new EventBus('FindFutureBus') + + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }) + + setTimeout(() => { + bus.dispatch(ParentEvent({})) + }, 50) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_type, 'ParentEvent') +}) + +test('find future works with string event keys', async () => { + const bus = new EventBus('FindFutureStringBus') + + const find_promise = bus.find('ParentEvent', { past: false, future: 0.5 }) + + setTimeout(() => { + bus.dispatch(ParentEvent({})) + }, 30) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_type, 'ParentEvent') +}) + +test('find future ignores past events', async () => { + const bus = new EventBus('FindFutureIgnoresPastBus') + + const prior = bus.dispatch(ParentEvent({})) + await prior.done() + + const found_event = await bus.find(ParentEvent, { past: false, future: 0.05 }) + assert.equal(found_event, null) +}) + +test('find future times out when no event arrives', async () => { + const bus = new EventBus('FindFutureTimeoutBus') + + const found_event = await bus.find(ParentEvent, { past: false, future: 0.05 }) + assert.equal(found_event, null) +}) + +test('find past=false future=false returns null immediately', async () => { + const bus = new EventBus('FindNeitherBus') + + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: false, future: false }) + const elapsed_ms = Date.now() - start + + assert.equal(found_event, null) + assert.ok(elapsed_ms < 100) +}) + +test('find past+future returns past event immediately', async () => { + const bus = new EventBus('FindPastFutureBus') + + const dispatched = bus.dispatch(ParentEvent({})) + await dispatched.done() + + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: true, future: 0.5 }) + const elapsed_ms = Date.now() - start + + assert.ok(found_event) + assert.equal(found_event.event_id, dispatched.event_id) + assert.ok(elapsed_ms < 100) +}) + +test('find past+future waits for future when no past match', async () => { + const bus = new EventBus('FindPastFutureWaitBus') + + const find_promise = bus.find(ChildEvent, { past: true, future: 0.3 }) + + setTimeout(() => { + bus.dispatch(ChildEvent({})) + }, 50) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_type, 'ChildEvent') +}) + +test('find past/future windows are independent', async () => { + const bus = new EventBus('FindWindowIndependentBus') + + const old_event = bus.dispatch(ParentEvent({})) + await old_event.done() + await delay(120) + + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: 0.05, future: 0.05 }) + const elapsed_ms = Date.now() - start + + assert.equal(found_event, null) + assert.ok(elapsed_ms > 30) +}) + +test('find past true future float returns old event immediately', async () => { + const bus = new EventBus('FindPastTrueFutureFloatBus') + + const dispatched = bus.dispatch(ParentEvent({})) + await dispatched.done() + await delay(120) + + const found_event = await bus.find(ParentEvent, { past: true, future: 0.1 }) + assert.ok(found_event) + assert.equal(found_event.event_id, dispatched.event_id) +}) + +test('find past float future waits for new event', async () => { + const bus = new EventBus('FindPastFloatFutureWaitBus') + + const old_event = bus.dispatch(ParentEvent({})) + await old_event.done() + await delay(120) + + const find_promise = bus.find(ParentEvent, { past: 0.05, future: 0.2 }) + + setTimeout(() => { + bus.dispatch(ParentEvent({})) + }, 50) + + const found_event = await find_promise + assert.ok(found_event) + assert.notEqual(found_event.event_id, old_event.event_id) +}) + +test('find past true future true returns past event immediately', async () => { + const bus = new EventBus('FindPastTrueFutureTrueBus') + + const dispatched = bus.dispatch(ParentEvent({})) + await dispatched.done() + + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: true, future: true }) + const elapsed_ms = Date.now() - start + + assert.ok(found_event) + assert.equal(found_event.event_id, dispatched.event_id) + assert.ok(elapsed_ms < 100) +}) + +test('find respects where filter', async () => { + const bus = new EventBus('FindWhereBus') + + const event_a = bus.dispatch(ScreenshotEvent({ target_id: 'tab-a' })) + const event_b = bus.dispatch(ScreenshotEvent({ target_id: 'tab-b' })) + await event_a.done() + await event_b.done() + + const found_event = await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab-b', { past: true, future: false }) + + assert.ok(found_event) + assert.equal(found_event.event_id, event_b.event_id) +}) + +test('find where filter works with future waiting', async () => { + const bus = new EventBus('FindWhereFutureBus') + + const find_promise = bus.find(UserActionEvent, (event) => event.user_id === 'user123', { past: false, future: 0.3 }) + + setTimeout(() => { + bus.dispatch(UserActionEvent({ action: 'logout', user_id: 'user456' })) + bus.dispatch(UserActionEvent({ action: 'login', user_id: 'user123' })) + }, 50) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.user_id, 'user123') +}) + +test('find with multiple concurrent waiters resolves correct events', async () => { + const bus = new EventBus('FindConcurrentBus') + + const find_normal = bus.find(UserActionEvent, (event) => event.action === 'normal', { past: false, future: 0.5 }) + const find_special = bus.find(UserActionEvent, (event) => event.action === 'special', { past: false, future: 0.5 }) + const find_system = bus.find('SystemEvent', { past: false, future: 0.5 }) + + setTimeout(() => { + bus.dispatch(UserActionEvent({ action: 'normal', user_id: 'u1' })) + bus.dispatch(SystemEvent({})) + bus.dispatch(UserActionEvent({ action: 'special', user_id: 'u2' })) + }, 50) + + const [normal, system, special] = await Promise.all([find_normal, find_system, find_special]) + + assert.ok(normal) + assert.equal(normal.action, 'normal') + assert.ok(system) + assert.equal(system.event_type, 'SystemEvent') + assert.ok(special) + assert.equal(special.action, 'special') +}) + +test('find child_of returns child event', async () => { + const bus = new EventBus('FindChildBus') + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({})) + }) + + const parent_event = bus.dispatch(ParentEvent({})) + await bus.waitUntilIdle() + + const child_event = await bus.find(ChildEvent, { + past: true, + future: false, + child_of: parent_event, + }) + + assert.ok(child_event) + assert.equal(child_event.event_parent_id, parent_event.event_id) +}) + +test('find child_of returns null for non-child', async () => { + const bus = new EventBus('FindNonChildBus') + + const parent_event = bus.dispatch(ParentEvent({})) + const unrelated_event = bus.dispatch(UnrelatedEvent({})) + await parent_event.done() + await unrelated_event.done() + + const found_event = await bus.find(UnrelatedEvent, { + past: true, + future: false, + child_of: parent_event, + }) + + assert.equal(found_event, null) +}) + +test('find child_of returns grandchild event', async () => { + const bus = new EventBus('FindGrandchildBus') + + let child_event_id: string | null = null + bus.on(ParentEvent, async (event) => { + const child = await event.bus?.emit(ChildEvent({})).done() + child_event_id = child?.event_id ?? null + }) + bus.on(ChildEvent, async (event) => { + await event.bus?.emit(GrandchildEvent({})).done() + }) + + const parent_event = bus.dispatch(ParentEvent({})) + await parent_event.done() + await bus.waitUntilIdle() + + const grandchild_event = await bus.find(GrandchildEvent, { + past: true, + future: false, + child_of: parent_event, + }) + + assert.ok(grandchild_event) + assert.equal(grandchild_event.event_parent_id, child_event_id) +}) + +test('find child_of works across forwarded buses', async () => { + const main_bus = new EventBus('MainBus') + const auth_bus = new EventBus('AuthBus') + + let child_event_id: string | null = null + + main_bus.on(ParentEvent, auth_bus.dispatch) + auth_bus.on(ParentEvent, async (event) => { + const child = await event.bus?.emit(ChildEvent({})).done() + child_event_id = child.event_id + }) + + const parent_event = main_bus.dispatch(ParentEvent({})) + await parent_event.done() + await main_bus.waitUntilIdle() + await auth_bus.waitUntilIdle() + + const found_child = await auth_bus.find(ChildEvent, { + past: 5, + future: 5, + child_of: parent_event, + }) + + assert.ok(found_child) + assert.equal(found_child.event_id, child_event_id) +}) + +test('find child_of filters to correct parent among siblings', async () => { + const bus = new EventBus('FindCorrectParentBus') + + bus.on(NavigateEvent, async (event) => { + await event.bus?.emit(TabCreatedEvent({ tab_id: `tab_for_${event.url}` })).done() + }) + bus.on(TabCreatedEvent, () => {}) + + const nav_1 = bus.dispatch(NavigateEvent({ url: 'site1' })) + const nav_2 = bus.dispatch(NavigateEvent({ url: 'site2' })) + await nav_1.done() + await nav_2.done() + + const tab_1 = await bus.find(TabCreatedEvent, { + child_of: nav_1, + past: true, + future: false, + }) + const tab_2 = await bus.find(TabCreatedEvent, { + child_of: nav_2, + past: true, + future: false, + }) + + assert.ok(tab_1) + assert.ok(tab_2) + assert.equal(tab_1.tab_id, 'tab_for_site1') + assert.equal(tab_2.tab_id, 'tab_for_site2') +}) + +test('find future with child_of waits for matching child', async () => { + const bus = new EventBus('FindFutureChildBus') + + bus.on(ParentEvent, async (event) => { + await delay(30) + await event.bus?.emit(ChildEvent({})).done() + }) + + const parent_event = bus.dispatch(ParentEvent({})) + + const find_promise = bus.find(ChildEvent, { + child_of: parent_event, + past: false, + future: 0.3, + }) + + const child_event = await find_promise + assert.ok(child_event) + assert.equal(child_event.event_parent_id, parent_event.event_id) +}) + +test('find with past float and where filter', async () => { + const bus = new EventBus('FindWherePastFloatBus') + + const old_event = bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })) + await old_event.done() + await delay(120) + const new_event = bus.dispatch(ScreenshotEvent({ target_id: 'tab2' })) + await new_event.done() + + const found_tab2 = await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab2', { past: 0.1, future: false }) + + assert.ok(found_tab2) + assert.equal(found_tab2.event_id, new_event.event_id) + + const found_tab1 = await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1', { past: 0.1, future: false }) + assert.equal(found_tab1, null) +}) + +test('find with child_of and past float', async () => { + const bus = new EventBus('FindChildPastFloatBus') + + let child_event_id: string | null = null + bus.on(ParentEvent, async (event) => { + const child = await event.bus?.emit(ChildEvent({})).done() + child_event_id = child?.event_id ?? null + }) + + const parent_event = bus.dispatch(ParentEvent({})) + await parent_event.done() + await bus.waitUntilIdle() + + const found_child = await bus.find(ChildEvent, { + child_of: parent_event, + past: 5, + future: false, + }) + + assert.ok(found_child) + assert.equal(found_child.event_id, child_event_id) +}) + +test('find with all parameters combined', async () => { + const bus = new EventBus('FindAllParamsBus') + + let child_event_id: string | null = null + bus.on(ParentEvent, async (event) => { + const child = await event.bus?.emit(ScreenshotEvent({ target_id: 'child_tab' })).done() + child_event_id = child?.event_id ?? null + }) + + const parent_event = bus.dispatch(ParentEvent({})) + await parent_event.done() + await bus.waitUntilIdle() + + const found_child = await bus.find(ScreenshotEvent, (event) => event.target_id === 'child_tab', { + child_of: parent_event, + past: 5, + future: false, + }) + + assert.ok(found_child) + assert.equal(found_child.event_id, child_event_id) +}) + +test('find past ignores in-progress events but returns after completion', async () => { + const bus = new EventBus('FindCompletedOnlyBus') + + bus.on(ParentEvent, async () => { + await delay(80) + }) + + const dispatched = bus.dispatch(ParentEvent({})) + await delay(10) + + const early_find = await bus.find(ParentEvent, { past: true, future: false }) + assert.equal(early_find, null) + + await dispatched.done() + + const later_find = await bus.find(ParentEvent, { past: true, future: false }) + assert.ok(later_find) + assert.equal(later_find.event_id, dispatched.event_id) +}) + +test('find future resolves before handlers complete', async () => { + const bus = new EventBus('FindBeforeCompleteBus') + + bus.on(ParentEvent, async () => { + await delay(80) + }) + + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }) + + setTimeout(() => { + bus.dispatch(ParentEvent({})) + }, 20) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_status, 'started') + + await found_event.done() + assert.equal(found_event.event_status, 'completed') +}) + +test('find catches child event that fired during parent handler', async () => { + const bus = new EventBus('FindRaceConditionBus') + + let tab_event_id: string | null = null + bus.on(NavigateEvent, async (event) => { + const tab_event = await event.bus?.emit(TabCreatedEvent({ tab_id: 'new_tab' })).done() + tab_event_id = tab_event?.event_id ?? null + }) + bus.on(TabCreatedEvent, () => {}) + + const nav_event = bus.dispatch(NavigateEvent({ url: 'https://example.com' })) + await nav_event.done() + + const found_tab = await bus.find(TabCreatedEvent, { + child_of: nav_event, + past: true, + future: false, + }) + + assert.ok(found_tab) + assert.equal(found_tab.event_id, tab_event_id) +}) + +test('find returns promise that can be awaited later', async () => { + const bus = new EventBus('FindPromiseBus') + + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }) + assert.ok(find_promise instanceof Promise) + + bus.dispatch(ParentEvent({})) + const found_event = await find_promise + assert.ok(found_event) +}) diff --git a/bubus-ts/tests/forwarding.test.ts b/bubus-ts/tests/forwarding.test.ts new file mode 100644 index 0000000..cb69616 --- /dev/null +++ b/bubus-ts/tests/forwarding.test.ts @@ -0,0 +1,186 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const PingEvent = BaseEvent.extend('PingEvent', { value: z.number() }) + +test('events forward between buses without duplication', async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + const bus_c = new EventBus('BusC') + + const seen_a: string[] = [] + const seen_b: string[] = [] + const seen_c: string[] = [] + + bus_a.on(PingEvent, (event) => { + seen_a.push(event.event_id) + }) + + bus_b.on(PingEvent, (event) => { + seen_b.push(event.event_id) + }) + + bus_c.on(PingEvent, (event) => { + seen_c.push(event.event_id) + }) + + bus_a.on('*', bus_b.dispatch) + bus_b.on('*', bus_c.dispatch) + + const event = bus_a.dispatch(PingEvent({ value: 1 })) + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() + + assert.equal(seen_a.length, 1) + assert.equal(seen_b.length, 1) + assert.equal(seen_c.length, 1) + + assert.equal(seen_a[0], event.event_id) + assert.equal(seen_b[0], event.event_id) + assert.equal(seen_c[0], event.event_id) + + assert.deepEqual(event.event_path, ['BusA', 'BusB', 'BusC']) +}) + +test('await event.done waits for handlers on forwarded buses', async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + const bus_c = new EventBus('BusC') + + const completion_log: string[] = [] + + const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + + bus_a.on(PingEvent, async () => { + await delay(10) + completion_log.push('A') + }) + + bus_b.on(PingEvent, async () => { + await delay(30) + completion_log.push('B') + }) + + bus_c.on(PingEvent, async () => { + await delay(50) + completion_log.push('C') + }) + + bus_a.on('*', bus_b.dispatch) + bus_b.on('*', bus_c.dispatch) + + const event = bus_a.dispatch(PingEvent({ value: 2 })) + + await event.done() + + assert.deepEqual(completion_log.sort(), ['A', 'B', 'C']) + assert.equal(event.event_pending_bus_count, 0) +}) + +test('circular forwarding A->B->C->A does not loop', async () => { + const peer1 = new EventBus('Peer1') + const peer2 = new EventBus('Peer2') + const peer3 = new EventBus('Peer3') + + const events_at_peer1: string[] = [] + const events_at_peer2: string[] = [] + const events_at_peer3: string[] = [] + + peer1.on(PingEvent, (event) => { + events_at_peer1.push(event.event_id) + }) + peer2.on(PingEvent, (event) => { + events_at_peer2.push(event.event_id) + }) + peer3.on(PingEvent, (event) => { + events_at_peer3.push(event.event_id) + }) + + // Create a full cycle: Peer1 -> Peer2 -> Peer3 -> Peer1 + peer1.on('*', peer2.dispatch) + peer2.on('*', peer3.dispatch) + peer3.on('*', peer1.dispatch) // completes the circle + + const event = peer1.dispatch(PingEvent({ value: 42 })) + + await peer1.waitUntilIdle() + await peer2.waitUntilIdle() + await peer3.waitUntilIdle() + + // Each peer must see the event exactly once (no infinite loop) + assert.equal(events_at_peer1.length, 1) + assert.equal(events_at_peer2.length, 1) + assert.equal(events_at_peer3.length, 1) + + // All saw the same event + assert.equal(events_at_peer1[0], event.event_id) + assert.equal(events_at_peer2[0], event.event_id) + assert.equal(events_at_peer3[0], event.event_id) + + // event_path shows propagation order without looping back + assert.deepEqual(event.event_path, ['Peer1', 'Peer2', 'Peer3']) + + // --- Start from a different peer in the same cycle --- + events_at_peer1.length = 0 + events_at_peer2.length = 0 + events_at_peer3.length = 0 + + const event2 = peer2.dispatch(PingEvent({ value: 99 })) + + await peer1.waitUntilIdle() + await peer2.waitUntilIdle() + await peer3.waitUntilIdle() + + // Each peer sees it exactly once + assert.equal(events_at_peer1.length, 1) + assert.equal(events_at_peer2.length, 1) + assert.equal(events_at_peer3.length, 1) + + // Path starts at Peer2, goes to Peer3, then Peer1 (stops before looping back to Peer2) + assert.deepEqual(event2.event_path, ['Peer2', 'Peer3', 'Peer1']) +}) + +test('await event.done waits when forwarding handler is async-delayed', async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + + const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + + let bus_a_done = false + let bus_b_done = false + + bus_a.on(PingEvent, async () => { + await delay(20) + bus_a_done = true + }) + + bus_b.on(PingEvent, async () => { + await delay(10) + bus_b_done = true + }) + + bus_a.on('*', async (event) => { + await delay(30) + bus_b.dispatch(event) + }) + + const event = bus_a.dispatch(PingEvent({ value: 3 })) + await event.done() + + assert.equal(bus_a_done, true) + assert.equal(bus_b_done, true) + assert.equal(event.event_pending_bus_count, 0) + assert.deepEqual(event.event_path, ['BusA', 'BusB']) +}) diff --git a/bubus-ts/tests/handlers.test.ts b/bubus-ts/tests/handlers.test.ts new file mode 100644 index 0000000..837eb6b --- /dev/null +++ b/bubus-ts/tests/handlers.test.ts @@ -0,0 +1,150 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const UserActionEvent = BaseEvent.extend('UserActionEvent', { + action: z.string(), + user_id: z.string(), +}) + +const SystemEventModel = BaseEvent.extend('SystemEventModel', { + event_name: z.string(), +}) + +test('handler registration via string, class, and wildcard', async () => { + const bus = new EventBus('HandlerRegistrationBus') + const results: Record = { + specific: [], + model: [], + universal: [], + } + + const user_handler = async (event: InstanceType): Promise => { + results.specific.push(event.action) + return 'user_handled' + } + + const system_handler = async (event: InstanceType): Promise => { + results.model.push(event.event_name) + return 'system_handled' + } + + const universal_handler = async (event: BaseEvent): Promise => { + results.universal.push(event.event_type) + return 'universal' + } + + const system_event_class = (SystemEventModel as unknown as { class: typeof BaseEvent }).class + + bus.on('UserActionEvent', user_handler) + bus.on(system_event_class, system_handler) + bus.on('*', universal_handler) + + bus.dispatch(UserActionEvent({ action: 'login', user_id: 'u1' })) + bus.dispatch(SystemEventModel({ event_name: 'startup' })) + await bus.waitUntilIdle() + + assert.deepEqual(results.specific, ['login']) + assert.deepEqual(results.model, ['startup']) + assert.deepEqual(new Set(results.universal), new Set(['UserActionEvent', 'SystemEventModel'])) +}) + +test('handlers can be sync or async', async () => { + const bus = new EventBus('SyncAsyncHandlersBus') + + const sync_handler = (_event: BaseEvent): string => 'sync' + const async_handler = async (_event: BaseEvent): Promise => 'async' + + bus.on('TestEvent', sync_handler) + bus.on('TestEvent', async_handler) + + const handler_count = Array.from(bus.handlers.values()).filter((entry) => entry.event_key === 'TestEvent').length + assert.equal(handler_count, 2) + + const event = bus.dispatch(BaseEvent.extend('TestEvent', {})({})) + await event.done() + + const results = Array.from(event.event_results.values()).map((result) => result.result) + assert.ok(results.includes('sync')) + assert.ok(results.includes('async')) +}) + +test('instance, class, and static method handlers', async () => { + const bus = new EventBus('MethodHandlersBus') + const results: string[] = [] + + class EventProcessor { + name: string + value: number + + constructor(name: string, value: number) { + this.name = name + this.value = value + } + + sync_method_handler = (event: InstanceType): Record => { + results.push(`${this.name}_sync`) + return { processor: this.name, value: this.value, action: event.action } + } + + async async_method_handler(event: InstanceType): Promise> { + await new Promise((resolve) => setTimeout(resolve, 10)) + results.push(`${this.name}_async`) + return { processor: this.name, value: this.value * 2, action: event.action } + } + + static class_method_handler(event: InstanceType): string { + results.push('classmethod') + return `Handled by ${event.event_type}` + } + + static static_method_handler(_event: InstanceType): string { + results.push('staticmethod') + return 'Handled by static method' + } + } + + const processor1 = new EventProcessor('Processor1', 10) + const processor2 = new EventProcessor('Processor2', 20) + + bus.on(UserActionEvent, processor1.sync_method_handler) + bus.on(UserActionEvent, processor1.async_method_handler.bind(processor1)) + bus.on(UserActionEvent, processor2.sync_method_handler) + bus.on('UserActionEvent', EventProcessor.class_method_handler) + bus.on('UserActionEvent', EventProcessor.static_method_handler) + + const event = UserActionEvent({ action: 'test_methods', user_id: 'u123' }) + const completed_event = bus.dispatch(event) + await completed_event.done() + + assert.equal(results.length, 5) + assert.ok(results.includes('Processor1_sync')) + assert.ok(results.includes('Processor1_async')) + assert.ok(results.includes('Processor2_sync')) + assert.ok(results.includes('classmethod')) + assert.ok(results.includes('staticmethod')) + + const result_values = Array.from(completed_event.event_results.values()).map((result) => result.result) + + const p1_sync = result_values.find( + (result) => + typeof result === 'object' && + result !== null && + (result as { processor?: string; value?: number }).processor === 'Processor1' && + (result as { value?: number }).value === 10 + ) as { action?: string } | undefined + + const p1_async = result_values.find( + (result) => + typeof result === 'object' && + result !== null && + (result as { processor?: string; value?: number }).processor === 'Processor1' && + (result as { value?: number }).value === 20 + ) as { action?: string } | undefined + + assert.equal(p1_sync?.action, 'test_methods') + assert.equal(p1_async?.action, 'test_methods') +}) diff --git a/bubus-ts/tests/locking.test.ts b/bubus-ts/tests/locking.test.ts new file mode 100644 index 0000000..06e1022 --- /dev/null +++ b/bubus-ts/tests/locking.test.ts @@ -0,0 +1,1117 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +/* +Potential failure modes + +A) Event concurrency modes +- global-serial not enforcing strict FIFO across multiple buses (events interleave). +- bus-serial allows cross-bus interleaving but still must be FIFO within a bus; breaks under forwarding. +- parallel accidentally serializes (e.g., semaphore still used) or breaks queue-jump semantics. +- auto not resolving correctly to bus defaults. + +B) Handler concurrency modes +- global-serial not enforcing strict handler order across buses. +- bus-serial leaks parallelism between handlers on the same bus. +- parallel accidentally serializes or fails to enforce per-handler ordering. +- auto not resolving correctly to handler options or bus defaults. + +C) Precedence resolution +- Event overrides not taking precedence over handler options. +- Handler options not taking precedence over bus defaults. +- Conflicting settings (event says parallel, handler says serial) choose wrong winner. + +D) Queue-jump / awaited events +- event.done() inside handler doesn’t jump the queue across buses. +- Queue-jump bypasses semaphores incorrectly in contexts where it shouldn’t. +- Queue-jump fails when event already in-flight. + +E) FIFO correctness +- FIFO order broken under bus-serial with interleaved emissions. +- FIFO order broken under global-serial across buses. +- FIFO order broken with forwarded events. + +F) Forwarding & bus context +- Forwarded event’s event.bus mutates current handler context (wrong bus). +- Child events emitted after forwarding are mis-parented. +- event.event_path diverges between buses. +- Handler attribution lost when forwarded across buses (tree/log issues). + +G) Parent/child tracking +- Child events not correctly linked to the parent handler when emitted via event.bus. +- event_children missing under concurrency due to async timing. +- event_pending_bus_count not decremented properly, leaving events stuck. + +H) Find semantics under concurrency +- find(past) returns event not yet completed. +- find(future) doesn’t resolve when event finishes in another bus. +- find with child_of returns mismatched events under concurrency. + +I) Timeouts + cancellation propagation +- Timeout doesn’t cancel pending child handlers. +- Cancelled results not marked or mis-attributed to the wrong handler. +- Timeout doesn’t propagate across forwarded buses (event still waits forever). + +J) Handler result validation +- event_result_schema not enforced under parallel handler completion. +- Invalid result doesn’t mark handler error or event failure. +- Timeout + schema error ordering wrong (e.g., schema error overwrites timeout). + +K) Idle / completion +- waitUntilIdle() returns early with in-flight events. +- event.done() resolves before children complete. +- event.done() never resolves due to deadlock in runloop. + +L) Reentrancy / nested awaits +- Nested awaited child events starve sibling handlers. +- Awaited child events skip semaphore incorrectly (deadlocks or ordering regressions). + +M) Edge-cases +- Multiple handlers for same event type with different options collide. +- Handler throws synchronously before await (still counted, no leaks). +- Handler returns a rejected promise (properly surfaced). +- Event emitted with event_concurrency/event_handler_concurrency invalid value (schema rejects). +- Event emitted with no bus set (done should reject). +*/ + +const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)) +const withResolvers = () => { + let resolve!: (value: T | PromiseLike) => void + let reject!: (reason?: unknown) => void + const promise = new Promise((resolve_fn, reject_fn) => { + resolve = resolve_fn + reject = reject_fn + }) + return { promise, resolve, reject } +} + +test('global-serial: only one event processes at a time across buses', async () => { + const SerialEvent = BaseEvent.extend('SerialEvent', { + order: z.number(), + source: z.string(), + }) + + const bus_a = new EventBus('GlobalSerialA', { event_concurrency: 'global-serial' }) + const bus_b = new EventBus('GlobalSerialB', { event_concurrency: 'global-serial' }) + + let in_flight = 0 + let max_in_flight = 0 + const starts: string[] = [] + + const handler = async (event: InstanceType) => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + starts.push(`${event.source}:${event.order}`) + await sleep(10) + in_flight -= 1 + } + + bus_a.on(SerialEvent, handler) + bus_b.on(SerialEvent, handler) + + for (let i = 0; i < 3; i += 1) { + bus_a.dispatch(SerialEvent({ order: i, source: 'a' })) + bus_b.dispatch(SerialEvent({ order: i, source: 'b' })) + } + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + assert.equal(max_in_flight, 1) + + const starts_a = starts.filter((value) => value.startsWith('a:')).map((value) => Number(value.split(':')[1])) + const starts_b = starts.filter((value) => value.startsWith('b:')).map((value) => Number(value.split(':')[1])) + + assert.deepEqual(starts_a, [0, 1, 2]) + assert.deepEqual(starts_b, [0, 1, 2]) +}) + +test('global-serial: awaited child jumps ahead of queued events across buses', async () => { + const ParentEvent = BaseEvent.extend('ParentEvent', {}) + const ChildEvent = BaseEvent.extend('ChildEvent', {}) + const QueuedEvent = BaseEvent.extend('QueuedEvent', {}) + + const bus_a = new EventBus('GlobalSerialParent', { event_concurrency: 'global-serial' }) + const bus_b = new EventBus('GlobalSerialChild', { event_concurrency: 'global-serial' }) + + const order: string[] = [] + + bus_b.on(ChildEvent, async () => { + order.push('child_start') + await sleep(5) + order.push('child_end') + }) + + bus_b.on(QueuedEvent, async () => { + order.push('queued_start') + await sleep(1) + order.push('queued_end') + }) + + bus_a.on(ParentEvent, async (event) => { + order.push('parent_start') + bus_b.emit(QueuedEvent({})) + // Emit through the scoped proxy so parent tracking is set up, + // then also dispatch to bus_b for cross-bus processing. + const child = event.bus?.emit(ChildEvent({}))! + bus_b.dispatch(child) + order.push('child_dispatched') + await child.done() + order.push('child_awaited') + order.push('parent_end') + }) + + const parent = bus_a.dispatch(ParentEvent({})) + await parent.done() + await bus_b.waitUntilIdle() + + const child_start_idx = order.indexOf('child_start') + const child_end_idx = order.indexOf('child_end') + const queued_start_idx = order.indexOf('queued_start') + + assert.ok(child_start_idx !== -1) + assert.ok(child_end_idx !== -1) + assert.ok(queued_start_idx !== -1) + assert.ok(child_start_idx < queued_start_idx) + assert.ok(child_end_idx < queued_start_idx) +}) + +test('global-serial: handler semaphore serializes handlers across buses', async () => { + const HandlerEvent = BaseEvent.extend('HandlerEvent', { + order: z.number(), + source: z.string(), + }) + + const bus_a = new EventBus('GlobalHandlerA', { + event_concurrency: 'parallel', + event_handler_concurrency: 'global-serial', + }) + const bus_b = new EventBus('GlobalHandlerB', { + event_concurrency: 'parallel', + event_handler_concurrency: 'global-serial', + }) + + let in_flight = 0 + let max_in_flight = 0 + + const handler = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await sleep(5) + in_flight -= 1 + } + + bus_a.on(HandlerEvent, handler) + bus_b.on(HandlerEvent, handler) + + for (let i = 0; i < 4; i += 1) { + bus_a.dispatch(HandlerEvent({ order: i, source: 'a' })) + bus_b.dispatch(HandlerEvent({ order: i, source: 'b' })) + } + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + assert.equal(max_in_flight, 1) +}) + +test('bus-serial: events serialize per bus but overlap across buses', async () => { + const SerialEvent = BaseEvent.extend('SerialPerBusEvent', { + order: z.number(), + source: z.string(), + }) + + const bus_a = new EventBus('BusSerialA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('BusSerialB', { event_concurrency: 'bus-serial' }) + + let in_flight_global = 0 + let max_in_flight_global = 0 + let in_flight_a = 0 + let in_flight_b = 0 + let max_in_flight_a = 0 + let max_in_flight_b = 0 + + let resolve_b_started: (() => void) | null = null + const b_started = new Promise((resolve) => { + resolve_b_started = resolve + }) + + bus_a.on(SerialEvent, async () => { + in_flight_global += 1 + in_flight_a += 1 + max_in_flight_global = Math.max(max_in_flight_global, in_flight_global) + max_in_flight_a = Math.max(max_in_flight_a, in_flight_a) + await b_started + await sleep(10) + in_flight_global -= 1 + in_flight_a -= 1 + }) + + bus_b.on(SerialEvent, async () => { + in_flight_global += 1 + in_flight_b += 1 + max_in_flight_global = Math.max(max_in_flight_global, in_flight_global) + max_in_flight_b = Math.max(max_in_flight_b, in_flight_b) + if (resolve_b_started) { + resolve_b_started() + resolve_b_started = null + } + await sleep(10) + in_flight_global -= 1 + in_flight_b -= 1 + }) + + bus_a.dispatch(SerialEvent({ order: 0, source: 'a' })) + bus_b.dispatch(SerialEvent({ order: 0, source: 'b' })) + + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + + assert.equal(max_in_flight_a, 1) + assert.equal(max_in_flight_b, 1) + assert.ok(max_in_flight_global >= 2) +}) + +test('bus-serial: FIFO order preserved per bus with interleaving', async () => { + const SerialEvent = BaseEvent.extend('SerialInterleavedEvent', { + order: z.number(), + source: z.string(), + }) + + const bus_a = new EventBus('BusSerialOrderA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('BusSerialOrderB', { event_concurrency: 'bus-serial' }) + + const starts_a: number[] = [] + const starts_b: number[] = [] + + bus_a.on(SerialEvent, async (event) => { + starts_a.push(event.order) + await sleep(2) + }) + + bus_b.on(SerialEvent, async (event) => { + starts_b.push(event.order) + await sleep(2) + }) + + for (let i = 0; i < 4; i += 1) { + bus_a.dispatch(SerialEvent({ order: i, source: 'a' })) + bus_b.dispatch(SerialEvent({ order: i, source: 'b' })) + } + + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + + assert.deepEqual(starts_a, [0, 1, 2, 3]) + assert.deepEqual(starts_b, [0, 1, 2, 3]) +}) + +test('bus-serial: awaiting child on one bus does not block other bus queue', async () => { + const ParentEvent = BaseEvent.extend('BusSerialParent', {}) + const ChildEvent = BaseEvent.extend('BusSerialChild', {}) + const OtherEvent = BaseEvent.extend('BusSerialOther', {}) + + const bus_a = new EventBus('BusSerialParentBus', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('BusSerialOtherBus', { event_concurrency: 'bus-serial' }) + + const order: string[] = [] + + bus_a.on(ChildEvent, async () => { + order.push('child_start') + await sleep(10) + order.push('child_end') + }) + + bus_a.on(ParentEvent, async (event) => { + order.push('parent_start') + const child = event.bus?.emit(ChildEvent({}))! + await child.done() + order.push('parent_end') + }) + + bus_b.on(OtherEvent, async () => { + order.push('other_start') + await sleep(2) + order.push('other_end') + }) + + const parent = bus_a.dispatch(ParentEvent({})) + await sleep(0) + bus_b.dispatch(OtherEvent({})) + + await parent.done() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + + const other_start_idx = order.indexOf('other_start') + const parent_end_idx = order.indexOf('parent_end') + assert.ok(other_start_idx !== -1) + assert.ok(parent_end_idx !== -1) + assert.ok(other_start_idx < parent_end_idx) +}) + +test('parallel: events overlap on same bus when event_concurrency is parallel', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEvent', { order: z.number() }) + const bus = new EventBus('ParallelEventBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + setTimeout(() => resolve(), 20) + + bus.on(ParallelEvent, async (_event) => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + await sleep(10) + in_flight -= 1 + }) + + bus.dispatch(ParallelEvent({ order: 0 })) + bus.dispatch(ParallelEvent({ order: 1 })) + + await bus.waitUntilIdle() + assert.ok(max_in_flight >= 2) +}) + +test('parallel: handlers overlap for same event when event_handler_concurrency is parallel', async () => { + const ParallelHandlerEvent = BaseEvent.extend('ParallelHandlerEvent', {}) + const bus = new EventBus('ParallelHandlerBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler_a = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + const handler_b = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus.on(ParallelHandlerEvent, handler_a) + bus.on(ParallelHandlerEvent, handler_b) + + const event = bus.dispatch(ParallelHandlerEvent({})) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() + + assert.ok(max_in_flight >= 2) +}) + +test('parallel: global-serial handler semaphore still serializes across buses', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEventGlobalHandler', { + source: z.string(), + }) + + const bus_a = new EventBus('ParallelHandlerGlobalA', { + event_concurrency: 'parallel', + event_handler_concurrency: 'global-serial', + }) + const bus_b = new EventBus('ParallelHandlerGlobalB', { + event_concurrency: 'parallel', + event_handler_concurrency: 'global-serial', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus_a.on(ParallelEvent, handler) + bus_b.on(ParallelEvent, handler) + + bus_a.dispatch(ParallelEvent({ source: 'a' })) + bus_b.dispatch(ParallelEvent({ source: 'b' })) + + await sleep(0) + resolve() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + + assert.equal(max_in_flight, 1) +}) + +test('precedence: event event_handler_concurrency overrides handler options', async () => { + const OverrideEvent = BaseEvent.extend('OverrideEvent', { + event_handler_concurrency: z.literal('bus-serial'), + }) + const bus = new EventBus('OverrideBus', { event_handler_concurrency: 'parallel' }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus.on(OverrideEvent, handler, { event_handler_concurrency: 'parallel' }) + bus.on(OverrideEvent, handler, { event_handler_concurrency: 'parallel' }) + + const event = bus.dispatch(OverrideEvent({ event_handler_concurrency: 'bus-serial' })) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() + + assert.equal(max_in_flight, 1) +}) + +test('precedence: handler options override bus defaults when event has no override', async () => { + const OptionEvent = BaseEvent.extend('OptionEvent', {}) + const bus = new EventBus('OptionBus', { event_handler_concurrency: 'bus-serial' }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler_a = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + const handler_b = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus.on(OptionEvent, handler_a, { event_handler_concurrency: 'parallel' }) + bus.on(OptionEvent, handler_b, { event_handler_concurrency: 'parallel' }) + + const event = bus.dispatch(OptionEvent({})) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() + + assert.ok(max_in_flight >= 2) +}) + +test('precedence: event event_handler_concurrency overrides handler options to parallel', async () => { + const OverrideEvent = BaseEvent.extend('OverrideEventParallelHandlers', { + event_handler_concurrency: z.literal('parallel'), + }) + const bus = new EventBus('OverrideParallelHandlersBus', { event_handler_concurrency: 'bus-serial' }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus.on(OverrideEvent, handler, { event_handler_concurrency: 'bus-serial' }) + bus.on(OverrideEvent, handler, { event_handler_concurrency: 'bus-serial' }) + + const event = bus.dispatch(OverrideEvent({ event_handler_concurrency: 'parallel' })) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() + + assert.ok(max_in_flight >= 2) +}) + +test('precedence: event event_concurrency overrides bus defaults to parallel', async () => { + const OverrideEvent = BaseEvent.extend('OverrideEventParallelEvents', { + event_concurrency: z.literal('parallel'), + order: z.number(), + }) + const bus = new EventBus('OverrideParallelEventsBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + bus.on(OverrideEvent, async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + }) + + bus.dispatch(OverrideEvent({ order: 0, event_concurrency: 'parallel' })) + bus.dispatch(OverrideEvent({ order: 1, event_concurrency: 'parallel' })) + + await sleep(0) + resolve() + await bus.waitUntilIdle() + + assert.ok(max_in_flight >= 2) +}) + +test('precedence: event event_concurrency overrides bus defaults to bus-serial', async () => { + const OverrideEvent = BaseEvent.extend('OverrideEventBusSerial', { + event_concurrency: z.literal('bus-serial'), + order: z.number(), + }) + const bus = new EventBus('OverrideBusSerialEventsBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + bus.on(OverrideEvent, async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + }) + + bus.dispatch(OverrideEvent({ order: 0, event_concurrency: 'bus-serial' })) + bus.dispatch(OverrideEvent({ order: 1, event_concurrency: 'bus-serial' })) + + await sleep(0) + assert.equal(max_in_flight, 1) + resolve() + await bus.waitUntilIdle() +}) + +test('global-serial + handler parallel: handlers overlap but events do not across buses', async () => { + const SerialParallelEvent = BaseEvent.extend('GlobalSerialParallelHandlers', {}) + + const bus_a = new EventBus('GlobalSerialParallelA', { + event_concurrency: 'global-serial', + event_handler_concurrency: 'parallel', + }) + const bus_b = new EventBus('GlobalSerialParallelB', { + event_concurrency: 'global-serial', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus_a.on(SerialParallelEvent, handler) + bus_a.on(SerialParallelEvent, handler) + bus_b.on(SerialParallelEvent, handler) + bus_b.on(SerialParallelEvent, handler) + + bus_a.dispatch(SerialParallelEvent({})) + bus_b.dispatch(SerialParallelEvent({})) + + await sleep(0) + assert.equal(max_in_flight, 2) + resolve() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) +}) + +test('event parallel + handler bus-serial: handlers serialize within a bus across events', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEventsSerialHandlers', { order: z.number() }) + const bus = new EventBus('ParallelEventsSerialHandlersBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'bus-serial', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + bus.on(ParallelEvent, async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + }) + + bus.dispatch(ParallelEvent({ order: 0 })) + bus.dispatch(ParallelEvent({ order: 1 })) + + await sleep(0) + assert.equal(max_in_flight, 1) + resolve() + await bus.waitUntilIdle() +}) + +test('event parallel + handler bus-serial: handlers overlap across buses', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEventsBusHandlers', { source: z.string() }) + + const bus_a = new EventBus('ParallelBusHandlersA', { + event_concurrency: 'parallel', + event_handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('ParallelBusHandlersB', { + event_concurrency: 'parallel', + event_handler_concurrency: 'bus-serial', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus_a.on(ParallelEvent, handler) + bus_b.on(ParallelEvent, handler) + + bus_a.dispatch(ParallelEvent({ source: 'a' })) + bus_b.dispatch(ParallelEvent({ source: 'b' })) + + await sleep(0) + assert.ok(max_in_flight >= 2) + resolve() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) +}) + +test('handler options can enforce global-serial even when bus defaults to parallel', async () => { + const HandlerEvent = BaseEvent.extend('HandlerOptionsGlobalSerial', { source: z.string() }) + + const bus_a = new EventBus('HandlerOptionsGlobalA', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + const bus_b = new EventBus('HandlerOptionsGlobalB', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus_a.on(HandlerEvent, handler, { event_handler_concurrency: 'global-serial' }) + bus_b.on(HandlerEvent, handler, { event_handler_concurrency: 'global-serial' }) + + bus_a.dispatch(HandlerEvent({ source: 'a' })) + bus_b.dispatch(HandlerEvent({ source: 'b' })) + + await sleep(0) + assert.equal(max_in_flight, 1) + resolve() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) +}) + +test('auto: event_concurrency auto resolves to bus defaults', async () => { + const AutoEvent = BaseEvent.extend('AutoEvent', { + event_concurrency: z.literal('auto'), + }) + const bus = new EventBus('AutoBus', { event_concurrency: 'bus-serial' }) + + let in_flight = 0 + let max_in_flight = 0 + + bus.on(AutoEvent, async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await sleep(5) + in_flight -= 1 + }) + + bus.dispatch(AutoEvent({ event_concurrency: 'auto' })) + bus.dispatch(AutoEvent({ event_concurrency: 'auto' })) + + await bus.waitUntilIdle() + assert.equal(max_in_flight, 1) +}) + +test('auto: event_handler_concurrency auto resolves to bus defaults', async () => { + const AutoHandlerEvent = BaseEvent.extend('AutoHandlerEvent', { + event_handler_concurrency: z.literal('auto'), + }) + const bus = new EventBus('AutoHandlerBus', { event_handler_concurrency: 'bus-serial' }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus.on(AutoHandlerEvent, handler) + bus.on(AutoHandlerEvent, handler) + + const event = bus.dispatch(AutoHandlerEvent({ event_handler_concurrency: 'auto' })) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() + + assert.equal(max_in_flight, 1) +}) + +test('queue-jump: awaited child preempts queued sibling on same bus', async () => { + const ParentEvent = BaseEvent.extend('QueueJumpParent', {}) + const ChildEvent = BaseEvent.extend('QueueJumpChild', {}) + const SiblingEvent = BaseEvent.extend('QueueJumpSibling', {}) + + const bus = new EventBus('QueueJumpBus', { event_concurrency: 'bus-serial' }) + const order: string[] = [] + + bus.on(ChildEvent, async () => { + order.push('child_start') + await sleep(5) + order.push('child_end') + }) + + bus.on(SiblingEvent, async () => { + order.push('sibling_start') + await sleep(1) + order.push('sibling_end') + }) + + bus.on(ParentEvent, async (event) => { + order.push('parent_start') + bus.emit(SiblingEvent({})) + const child = event.bus?.emit(ChildEvent({}))! + order.push('child_dispatched') + await child.done() + order.push('child_awaited') + order.push('parent_end') + }) + + const parent = bus.dispatch(ParentEvent({})) + await parent.done() + await bus.waitUntilIdle() + + const child_start_idx = order.indexOf('child_start') + const child_end_idx = order.indexOf('child_end') + const sibling_start_idx = order.indexOf('sibling_start') + + assert.ok(child_start_idx !== -1) + assert.ok(child_end_idx !== -1) + assert.ok(sibling_start_idx !== -1) + assert.ok(child_start_idx < sibling_start_idx) + assert.ok(child_end_idx < sibling_start_idx) +}) + +test('queue-jump: same event handlers on separate buses stay isolated without forwarding', async () => { + const ParentEvent = BaseEvent.extend('QueueJumpIsolatedParent', {}) + const SharedEvent = BaseEvent.extend('QueueJumpIsolatedShared', {}) + const SiblingEvent = BaseEvent.extend('QueueJumpIsolatedSibling', {}) + + const bus_a = new EventBus('QueueJumpIsolatedA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('QueueJumpIsolatedB', { event_concurrency: 'bus-serial' }) + + const order: string[] = [] + let bus_a_shared_runs = 0 + let bus_b_shared_runs = 0 + + bus_a.on(SharedEvent, async () => { + bus_a_shared_runs += 1 + order.push('bus_a_shared_start') + await sleep(2) + order.push('bus_a_shared_end') + }) + + bus_b.on(SharedEvent, async () => { + bus_b_shared_runs += 1 + order.push('bus_b_shared_start') + await sleep(2) + order.push('bus_b_shared_end') + }) + + bus_a.on(SiblingEvent, async () => { + order.push('bus_a_sibling_start') + await sleep(1) + order.push('bus_a_sibling_end') + }) + + bus_a.on(ParentEvent, async (event) => { + order.push('parent_start') + bus_a.emit(SiblingEvent({})) + const shared = event.bus?.emit(SharedEvent({}))! + order.push('shared_dispatched') + await shared.done() + order.push('shared_awaited') + order.push('parent_end') + }) + + const parent = bus_a.dispatch(ParentEvent({})) + await parent.done() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + + assert.equal(bus_a_shared_runs, 1) + assert.equal(bus_b_shared_runs, 0) + assert.equal(order.includes('bus_b_shared_start'), false) + + const bus_a_shared_end_idx = order.indexOf('bus_a_shared_end') + const bus_a_sibling_start_idx = order.indexOf('bus_a_sibling_start') + assert.ok(bus_a_shared_end_idx !== -1) + assert.ok(bus_a_sibling_start_idx !== -1) + assert.ok(bus_a_shared_end_idx < bus_a_sibling_start_idx) +}) + +test('queue-jump: awaiting in-flight event does not double-run handlers', async () => { + const InFlightEvent = BaseEvent.extend('InFlightEvent', {}) + const bus = new EventBus('InFlightBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + let handler_runs = 0 + let resolve_started: (() => void) | null = null + const started = new Promise((resolve) => { + resolve_started = resolve + }) + const { promise: release_child, resolve: resolve_child } = withResolvers() + + bus.on(InFlightEvent, async () => { + handler_runs += 1 + if (resolve_started) { + resolve_started() + resolve_started = null + } + await release_child + }) + + const child = bus.dispatch(InFlightEvent({})) + await started + + let done_resolved = false + const done_promise = child.done().then(() => { + done_resolved = true + }) + + await sleep(0) + assert.equal(done_resolved, false) + + resolve_child() + await done_promise + await bus.waitUntilIdle() + + assert.equal(handler_runs, 1) +}) + +test('edge-case: event with no handlers completes immediately', async () => { + const NoHandlerEvent = BaseEvent.extend('NoHandlerEvent', {}) + const bus = new EventBus('NoHandlerBus') + + const event = bus.dispatch(NoHandlerEvent({})) + await event.done() + await bus.waitUntilIdle() + + assert.equal(event.event_status, 'completed') + assert.equal(event.event_pending_bus_count, 0) +}) + +test('fifo: forwarded events preserve order on target bus (bus-serial)', async () => { + const OrderedEvent = BaseEvent.extend('ForwardOrderEvent', { order: z.number() }) + + const bus_a = new EventBus('ForwardOrderA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('ForwardOrderB', { event_concurrency: 'bus-serial' }) + + const order_a: number[] = [] + const order_b: number[] = [] + + bus_a.on(OrderedEvent, async (event) => { + order_a.push(event.order) + bus_b.dispatch(event) + await sleep(2) + }) + + bus_b.on(OrderedEvent, async (event) => { + const bus_b_results = Array.from(event.event_results.values()).filter((result) => result.eventbus_name === 'ForwardOrderB') + const in_flight = bus_b_results.filter((result) => result.status === 'pending' || result.status === 'started') + assert.ok(in_flight.length <= 1) + order_b.push(event.order) + await sleep(1) + }) + + for (let i = 0; i < 5; i += 1) { + bus_a.dispatch(OrderedEvent({ order: i })) + } + + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + + const history_orders = Array.from(bus_b.event_history.values()).map((event) => (event as { order?: number }).order) + const results_sizes = Array.from(bus_b.event_history.values()).map((event) => event.event_results.size) + const bus_b_result_counts = Array.from(bus_b.event_history.values()).map( + (event) => Array.from(event.event_results.values()).filter((result) => result.eventbus_name === 'ForwardOrderB').length + ) + const processed_flags = Array.from(bus_b.event_history.values()).map((event) => + Array.from(event.event_results.values()) + .filter((result) => result.eventbus_name === 'ForwardOrderB') + .every((result) => result.status === 'completed' || result.status === 'error') + ) + const pending_counts = Array.from(bus_b.event_history.values()).map( + (event) => Array.from(event.event_results.values()).filter((result) => result.status === 'pending').length + ) + assert.deepEqual(order_a, [0, 1, 2, 3, 4]) + assert.deepEqual(order_b, [0, 1, 2, 3, 4]) + assert.deepEqual(history_orders, [0, 1, 2, 3, 4]) + assert.deepEqual(results_sizes, [2, 2, 2, 2, 2]) + assert.deepEqual(bus_b_result_counts, [1, 1, 1, 1, 1]) + assert.deepEqual(processed_flags, [true, true, true, true, true]) + assert.deepEqual(pending_counts, [0, 0, 0, 0, 0]) +}) + +test('fifo: forwarded events preserve order across chained buses (bus-serial)', async () => { + const OrderedEvent = BaseEvent.extend('ForwardChainEvent', { order: z.number() }) + + const bus_a = new EventBus('ForwardChainA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('ForwardChainB', { event_concurrency: 'bus-serial' }) + const bus_c = new EventBus('ForwardChainC', { event_concurrency: 'bus-serial' }) + + const order_c: number[] = [] + + bus_b.on(OrderedEvent, async () => { + await sleep(2) + }) + + bus_c.on(OrderedEvent, async (event) => { + order_c.push(event.order) + await sleep(1) + }) + + bus_a.on('*', bus_b.dispatch) + bus_b.on('*', bus_c.dispatch) + + for (let i = 0; i < 6; i += 1) { + bus_a.dispatch(OrderedEvent({ order: i })) + } + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() + + assert.deepEqual(order_c, [0, 1, 2, 3, 4, 5]) +}) + +test('find: past returns most recent completed event (bus-scoped)', async () => { + const DebounceEvent = BaseEvent.extend('FindPastEvent', { value: z.number() }) + const bus = new EventBus('FindPastBus') + + bus.on(DebounceEvent, async () => {}) + + bus.dispatch(DebounceEvent({ value: 1 })) + bus.dispatch(DebounceEvent({ value: 2 })) + + await bus.waitUntilIdle() + + const found = await bus.find(DebounceEvent, { past: true, future: false }) + assert.ok(found) + assert.equal(found.value, 2) + assert.equal(found.event_status, 'completed') + assert.ok(found.bus) + assert.equal(found.bus.name, 'FindPastBus') + assert.equal(typeof found.bus.dispatch, 'function') +}) + +test('find: future returns in-flight event and done waits', async () => { + const DebounceEvent = BaseEvent.extend('FindFutureEvent', { value: z.number() }) + const bus = new EventBus('FindFutureBus') + const { promise, resolve } = withResolvers() + + bus.on(DebounceEvent, async () => { + await promise + }) + + bus.dispatch(DebounceEvent({ value: 1 })) + + const found = await bus.find(DebounceEvent, { past: false, future: true }) + assert.ok(found) + assert.equal(found.value, 1) + assert.ok(found.event_status !== 'completed') + assert.ok(found.bus) + assert.equal(found.bus.name, 'FindFutureBus') + + resolve() + const completed = await found.done() + assert.equal(completed.event_status, 'completed') +}) + +test('find: future waits for next event when none in-flight', async () => { + const DebounceEvent = BaseEvent.extend('FindWaitEvent', { value: z.number() }) + const bus = new EventBus('FindWaitBus') + + bus.on(DebounceEvent, async () => {}) + + setTimeout(() => { + bus.dispatch(DebounceEvent({ value: 99 })) + }, 10) + + const found = await bus.find(DebounceEvent, { past: false, future: 0.2 }) + assert.ok(found) + assert.equal(found.value, 99) + assert.ok(found.bus) + assert.equal(found.bus.name, 'FindWaitBus') + await found.done() +}) + +test('find: most recent wins across completed and in-flight', async () => { + const DebounceEvent = BaseEvent.extend('FindMostRecentEvent', { value: z.number() }) + const bus = new EventBus('FindMostRecentBus') + const { promise, resolve } = withResolvers() + + bus.on(DebounceEvent, async (event) => { + if (event.value === 2) { + await promise + } + }) + + bus.dispatch(DebounceEvent({ value: 1 })) + await bus.waitUntilIdle() + + bus.dispatch(DebounceEvent({ value: 2 })) + + const found = await bus.find(DebounceEvent, { past: true, future: true }) + assert.ok(found) + assert.equal(found.value, 2) + assert.ok(found.event_status !== 'completed') + + resolve() + await found.done() +}) diff --git a/bubus-ts/tests/log_tree.test.ts b/bubus-ts/tests/log_tree.test.ts new file mode 100644 index 0000000..535a26f --- /dev/null +++ b/bubus-ts/tests/log_tree.test.ts @@ -0,0 +1,227 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus, EventResult } from '../src/index.js' +import { EventHandler } from '../src/event_handler.js' +import type { EventHandlerFunction } from '../src/types.js' + +const RootEvent = BaseEvent.extend('RootEvent', { data: z.string().optional() }) +const ChildEvent = BaseEvent.extend('ChildEvent', { value: z.number().optional() }) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', { nested: z.record(z.number()).optional() }) + +class ValueError extends Error { + constructor(message: string) { + super(message) + this.name = 'ValueError' + } +} + +const createHandlerEntry = (bus: EventBus, handler_id: string, handler_name: string, event_key: string): EventHandler => { + const handler: EventHandlerFunction = () => undefined + const { isostring: handler_registered_at, ts: handler_registered_ts } = BaseEvent.nextTimestamp() + return new EventHandler({ + id: handler_id, + handler, + handler_name, + handler_timeout: bus.event_timeout_default, + handler_registered_at, + handler_registered_ts, + event_key, + eventbus_name: bus.name, + }) +} + +test('logTree: single event', () => { + const bus = new EventBus('SingleBus') + + const event = RootEvent({ data: 'test' }) + event.event_status = 'completed' + event.event_completed_at = event.event_created_at + + bus.event_history.set(event.event_id, event) + + const output = bus.logTree() + + assert.ok(output.includes('└── βœ… RootEvent#')) + assert.ok(output.includes('[') && output.includes(']')) +}) + +test('logTree: with handler results', () => { + const bus = new EventBus('HandlerBus') + + const event = RootEvent({ data: 'test' }) + event.event_status = 'completed' + event.event_completed_at = event.event_created_at + + const handler_id = 'handler-1' + const result = new EventResult({ + event, + handler: createHandlerEntry(bus, handler_id, 'test_handler', event.event_type), + }) + result.markStarted() + result.markCompleted('status: success') + event.event_results.set(handler_id, result) + + bus.event_history.set(event.event_id, event) + + const output = bus.logTree() + + assert.ok(output.includes('└── βœ… RootEvent#')) + assert.ok(output.includes('HandlerBus.test_handler#')) + assert.ok(output.includes('"status: success"')) +}) + +test('logTree: with handler errors', () => { + const bus = new EventBus('ErrorBus') + + const event = RootEvent({ data: 'test' }) + event.event_status = 'completed' + event.event_completed_at = event.event_created_at + + const handler_id = 'handler-2' + const result = new EventResult({ + event, + handler: createHandlerEntry(bus, handler_id, 'error_handler', event.event_type), + }) + result.markStarted() + result.markError(new ValueError('Test error message')) + event.event_results.set(handler_id, result) + + bus.event_history.set(event.event_id, event) + + const output = bus.logTree() + + assert.ok(output.includes('ErrorBus.error_handler#')) + assert.ok(output.includes('ValueError: Test error message')) +}) + +test('logTree: complex nested', () => { + const bus = new EventBus('ComplexBus') + + const root = RootEvent({ data: 'root_data' }) + root.event_status = 'completed' + root.event_completed_at = root.event_created_at + + const root_handler_id = 'handler-root' + const root_result = new EventResult({ + event: root, + handler: createHandlerEntry(bus, root_handler_id, 'root_handler', root.event_type), + }) + root_result.markStarted() + root_result.markCompleted('Root processed') + root.event_results.set(root_handler_id, root_result) + + const child = ChildEvent({ value: 100 }) + child.event_parent_id = root.event_id + child.event_status = 'completed' + child.event_completed_at = child.event_created_at + root_result.event_children.push(child) + + const child_handler_id = 'handler-child' + const child_result = new EventResult({ + event: child, + handler: createHandlerEntry(bus, child_handler_id, 'child_handler', child.event_type), + }) + child_result.markStarted() + child_result.markCompleted([1, 2, 3]) + child.event_results.set(child_handler_id, child_result) + + const grandchild = GrandchildEvent({}) + grandchild.event_parent_id = child.event_id + grandchild.event_status = 'completed' + grandchild.event_completed_at = grandchild.event_created_at + child_result.event_children.push(grandchild) + + const grandchild_handler_id = 'handler-grandchild' + const grandchild_result = new EventResult({ + event: grandchild, + handler: createHandlerEntry(bus, grandchild_handler_id, 'grandchild_handler', grandchild.event_type), + }) + grandchild_result.markStarted() + grandchild_result.markCompleted(null) + grandchild.event_results.set(grandchild_handler_id, grandchild_result) + + bus.event_history.set(root.event_id, root) + bus.event_history.set(child.event_id, child) + bus.event_history.set(grandchild.event_id, grandchild) + + const output = bus.logTree() + + assert.ok(output.includes('βœ… RootEvent#')) + assert.ok(output.includes('βœ… ComplexBus.root_handler#')) + assert.ok(output.includes('βœ… ChildEvent#')) + assert.ok(output.includes('βœ… ComplexBus.child_handler#')) + assert.ok(output.includes('βœ… GrandchildEvent#')) + assert.ok(output.includes('βœ… ComplexBus.grandchild_handler#')) + assert.ok(output.includes('"Root processed"')) + assert.ok(output.includes('list(3 items)')) + assert.ok(output.includes('None')) +}) + +test('logTree: multiple roots', () => { + const bus = new EventBus('MultiBus') + + const root1 = RootEvent({ data: 'first' }) + root1.event_status = 'completed' + root1.event_completed_at = root1.event_created_at + + const root2 = RootEvent({ data: 'second' }) + root2.event_status = 'completed' + root2.event_completed_at = root2.event_created_at + + bus.event_history.set(root1.event_id, root1) + bus.event_history.set(root2.event_id, root2) + + const output = bus.logTree() + + assert.equal(output.split('β”œβ”€β”€ βœ… RootEvent#').length - 1, 1) + assert.equal(output.split('└── βœ… RootEvent#').length - 1, 1) +}) + +test('logTree: timing info', () => { + const bus = new EventBus('TimingBus') + + const event = RootEvent({}) + event.event_status = 'completed' + event.event_completed_at = event.event_created_at + + const handler_id = 'handler-time' + const result = new EventResult({ + event, + handler: createHandlerEntry(bus, handler_id, 'timed_handler', event.event_type), + }) + result.markStarted() + result.markCompleted('done') + event.event_results.set(handler_id, result) + + bus.event_history.set(event.event_id, event) + + const output = bus.logTree() + + assert.ok(output.includes('(')) + assert.ok(output.includes('s)')) +}) + +test('logTree: running handler', () => { + const bus = new EventBus('RunningBus') + + const event = RootEvent({}) + event.event_status = 'started' + + const handler_id = 'handler-running' + const result = new EventResult({ + event, + handler: createHandlerEntry(bus, handler_id, 'running_handler', event.event_type), + }) + result.markStarted() + event.event_results.set(handler_id, result) + + bus.event_history.set(event.event_id, event) + + const output = bus.logTree() + + assert.ok(output.includes('RunningBus.running_handler#')) + assert.ok(output.includes('RootEvent#')) +}) diff --git a/bubus-ts/tests/parent_child.test.ts b/bubus-ts/tests/parent_child.test.ts new file mode 100644 index 0000000..8470772 --- /dev/null +++ b/bubus-ts/tests/parent_child.test.ts @@ -0,0 +1,67 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' + +const ParentEvent = BaseEvent.extend('ParentEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', {}) +const UnrelatedEvent = BaseEvent.extend('UnrelatedEvent', {}) + +test('eventIsChildOf and eventIsParentOf work for direct children', async () => { + const bus = new EventBus('ParentChildBus') + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({})) + }) + + const parent_event = bus.dispatch(ParentEvent({})) + await bus.waitUntilIdle() + + const child_event = Array.from(bus.event_history.values()).find((event) => event.event_type === 'ChildEvent') + assert.ok(child_event) + + assert.equal(child_event.event_parent_id, parent_event.event_id) + assert.equal(child_event.event_parent?.event_id, parent_event.event_id) + assert.equal(bus.eventIsChildOf(child_event, parent_event), true) + assert.equal(bus.eventIsParentOf(parent_event, child_event), true) +}) + +test('eventIsChildOf works for grandchildren', async () => { + const bus = new EventBus('GrandchildBus') + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({})) + }) + + bus.on(ChildEvent, (event) => { + event.bus?.emit(GrandchildEvent({})) + }) + + const parent_event = bus.dispatch(ParentEvent({})) + await bus.waitUntilIdle() + + const child_event = Array.from(bus.event_history.values()).find((event) => event.event_type === 'ChildEvent') + const grandchild_event = Array.from(bus.event_history.values()).find((event) => event.event_type === 'GrandchildEvent') + + assert.ok(child_event) + assert.ok(grandchild_event) + + assert.equal(bus.eventIsChildOf(child_event, parent_event), true) + assert.equal(bus.eventIsChildOf(grandchild_event, parent_event), true) + assert.equal(child_event.event_parent?.event_id, parent_event.event_id) + assert.equal(grandchild_event.event_parent?.event_id, child_event.event_id) + assert.equal(bus.eventIsParentOf(parent_event, grandchild_event), true) +}) + +test('eventIsChildOf returns false for unrelated events', async () => { + const bus = new EventBus('UnrelatedBus') + + const parent_event = bus.dispatch(ParentEvent({})) + const unrelated_event = bus.dispatch(UnrelatedEvent({})) + await parent_event.done() + await unrelated_event.done() + + assert.equal(bus.eventIsChildOf(unrelated_event, parent_event), false) + assert.equal(bus.eventIsParentOf(parent_event, unrelated_event), false) +}) diff --git a/bubus-ts/tests/performance.test.ts b/bubus-ts/tests/performance.test.ts new file mode 100644 index 0000000..4e012e7 --- /dev/null +++ b/bubus-ts/tests/performance.test.ts @@ -0,0 +1,418 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' +import { z } from 'zod' + +import { BaseEvent, EventBus, EventHandlerTimeoutError, EventHandlerCancelledError } from '../src/index.js' + +const SimpleEvent = BaseEvent.extend('SimpleEvent', {}) + +const mb = (bytes: number) => (bytes / 1024 / 1024).toFixed(1) + +test('processes 50k events within reasonable time', { timeout: 30_000 }, async () => { + const total_events = 50_000 + // Keep full history to avoid trimming inflight events during perf runs. + const bus = new EventBus('PerfBus', { max_history_size: total_events }) + + let processed_count = 0 + bus.on(SimpleEvent, () => { + processed_count += 1 + }) + + global.gc?.() + const mem_before = process.memoryUsage() + + const t0 = Date.now() + + const pending: Array> = [] + for (let i = 0; i < total_events; i += 1) { + pending.push(bus.dispatch(SimpleEvent({}))) + } + + const t_dispatch = Date.now() + const mem_dispatch = process.memoryUsage() + + await Promise.all(pending.map((event) => event.done())) + await bus.waitUntilIdle() + + const t_done = Date.now() + const mem_done = process.memoryUsage() + + global.gc?.() + const mem_gc = process.memoryUsage() + + const dispatch_ms = t_dispatch - t0 + const await_ms = t_done - t_dispatch + const total_ms = t_done - t0 + + console.log( + `\n perf: ${total_events} events in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + + `\n dispatch: ${dispatch_ms}ms | await: ${await_ms}ms` + + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ dispatch=${mb(mem_dispatch.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + + `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${((mem_done.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB | heap_gc=${((mem_gc.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB` + + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + ) + + assert.equal(processed_count, total_events) + assert.ok(total_ms < 30_000, `Processing took ${total_ms}ms`) + assert.ok(bus.event_history.size <= bus.max_history_size!) + + bus.destroy() +}) + +// Simulates a fastify backend where each request creates its own bus with handlers, +// processes events, then tears down. Tests that bus creation/destruction at scale +// doesn't leak memory or degrade performance. +test('500 ephemeral buses with 100 events each', { timeout: 30_000 }, async () => { + const total_buses = 500 + const events_per_bus = 100 + const total_events = total_buses * events_per_bus + + let processed_count = 0 + + global.gc?.() + const mem_before = process.memoryUsage() + const t0 = Date.now() + + for (let b = 0; b < total_buses; b += 1) { + // Avoid trimming inflight events during perf runs. + const bus = new EventBus(`ReqBus-${b}`, { max_history_size: events_per_bus }) + + bus.on(SimpleEvent, () => { + processed_count += 1 + }) + + const pending: Array> = [] + for (let i = 0; i < events_per_bus; i += 1) { + pending.push(bus.dispatch(SimpleEvent({}))) + } + + await Promise.all(pending.map((event) => event.done())) + await bus.waitUntilIdle() + + bus.destroy() + } + + const t_done = Date.now() + const mem_done = process.memoryUsage() + + global.gc?.() + const mem_gc = process.memoryUsage() + + const total_ms = t_done - t0 + + console.log( + `\n perf: ${total_buses} buses Γ— ${events_per_bus} events = ${total_events} total in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + + `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${((mem_done.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB | heap_gc=${((mem_gc.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB` + + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + + `\n live bus instances: ${EventBus._all_instances.size}` + ) + + assert.equal(processed_count, total_events) + assert.ok(total_ms < 30_000, `Processing took ${total_ms}ms`) + // All buses should have been cleaned up from the registry + assert.equal(EventBus._all_instances.size, 0, 'All buses should be destroyed') +}) + +// Simulates per-request handler registration pattern: a shared bus where each +// "request" registers a handler with .on(), dispatches events, then removes the +// handler with .off(). Tests for handler map churn overhead and cleanup leaks. +test('50k events with ephemeral on/off handler registration across 2 buses', { timeout: 30_000 }, async () => { + const RequestEvent = BaseEvent.extend('RequestEvent', {}) + + const total_events = 50_000 + // Keep full history to avoid trimming inflight events during perf runs. + const bus_a = new EventBus('SharedBusA', { max_history_size: total_events }) + const bus_b = new EventBus('SharedBusB', { max_history_size: total_events }) + let processed_a = 0 + let processed_b = 0 + let on_ms = 0 + let off_ms = 0 + let dispatch_a_ms = 0 + let dispatch_b_ms = 0 + let done_ms = 0 + let process_a_ms = 0 + let process_b_ms = 0 + let handler_a_ms = 0 + let handler_b_ms = 0 + + // Persistent handler on bus_b that forwards count + bus_b.on(RequestEvent, () => { + processed_b += 1 + }) + + const bus_a_any = bus_a as any + const bus_b_any = bus_b as any + const original_process_a = typeof bus_a_any.processEvent === 'function' ? bus_a_any.processEvent.bind(bus_a) : null + const original_process_b = typeof bus_b_any.processEvent === 'function' ? bus_b_any.processEvent.bind(bus_b) : null + const original_run_handler_a = typeof bus_a_any.runEventHandler === 'function' ? bus_a_any.runEventHandler.bind(bus_a) : null + const original_run_handler_b = typeof bus_b_any.runEventHandler === 'function' ? bus_b_any.runEventHandler.bind(bus_b) : null + + if (original_process_a) { + bus_a_any.processEvent = async (event: any) => { + const t = performance.now() + try { + return await original_process_a(event) + } finally { + process_a_ms += performance.now() - t + } + } + } + if (original_process_b) { + bus_b_any.processEvent = async (event: any) => { + const t = performance.now() + try { + return await original_process_b(event) + } finally { + process_b_ms += performance.now() - t + } + } + } + if (original_run_handler_a) { + bus_a_any.runEventHandler = async (...args: any[]) => { + const t = performance.now() + try { + return await original_run_handler_a(...args) + } finally { + handler_a_ms += performance.now() - t + } + } + } + if (original_run_handler_b) { + bus_b_any.runEventHandler = async (...args: any[]) => { + const t = performance.now() + try { + return await original_run_handler_b(...args) + } finally { + handler_b_ms += performance.now() - t + } + } + } + + global.gc?.() + const mem_before = process.memoryUsage() + const t0 = Date.now() + + for (let i = 0; i < total_events; i += 1) { + // Register ephemeral handler + const ephemeral_handler = () => { + processed_a += 1 + } + let t = performance.now() + bus_a.on(RequestEvent, ephemeral_handler) + on_ms += performance.now() - t + + // Dispatch on bus_a, forward to bus_b + const event = RequestEvent({}) + t = performance.now() + const ev_a = bus_a.dispatch(event) + dispatch_a_ms += performance.now() - t + t = performance.now() + bus_b.dispatch(event) + dispatch_b_ms += performance.now() - t + + t = performance.now() + await ev_a.done() + done_ms += performance.now() - t + + // Tear down ephemeral handler + t = performance.now() + bus_a.off(RequestEvent, ephemeral_handler) + off_ms += performance.now() - t + } + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + const t_done = Date.now() + const mem_done = process.memoryUsage() + + global.gc?.() + const mem_gc = process.memoryUsage() + + const total_ms = t_done - t0 + + console.log( + `\n perf: ${total_events} events with ephemeral on/off in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + + `\n dispatch: bus_a=${processed_a} | bus_b=${processed_b}` + + `\n timings: on=${on_ms.toFixed(0)}ms | off=${off_ms.toFixed(0)}ms | dispatch_a=${dispatch_a_ms.toFixed(0)}ms | dispatch_b=${dispatch_b_ms.toFixed(0)}ms | done=${done_ms.toFixed(0)}ms` + + `\n processing: bus_a=${process_a_ms.toFixed(0)}ms | bus_b=${process_b_ms.toFixed(0)}ms | handlers_a=${handler_a_ms.toFixed(0)}ms | handlers_b=${handler_b_ms.toFixed(0)}ms` + + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + + `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${((mem_done.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB | heap_gc=${((mem_gc.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB` + + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + + `\n bus_a handlers: ${bus_a.handlers.size} | bus_b handlers: ${bus_b.handlers.size}` + ) + + assert.equal(processed_a, total_events) + assert.equal(processed_b, total_events) + assert.ok(total_ms < 30_000, `Processing took ${total_ms}ms`) + // Ephemeral handlers should all be cleaned up + assert.equal(bus_a.handlers.size, 0, 'All ephemeral handlers should be removed from bus_a') + assert.equal(bus_b.handlers.size, 1, 'bus_b should still have its persistent handler') + assert.ok(bus_a.event_history.size <= bus_a.max_history_size!) + assert.ok(bus_b.event_history.size <= bus_b.max_history_size!) + + bus_a.destroy() + bus_b.destroy() +}) + +// Worst-case memory leak stress test. Exercises every retention path simultaneously: +// multi-bus forwarding, queue-jumping (done() inside handler), timeouts that cancel +// pending handlers, nested parent-child-grandchild trees, Proxy accumulation from +// getEventProxyScopedToThisBus, ephemeral on/off handler churn, and find() waiter timeouts. +// If any code path leaks references, memory will grow unbounded across 2000 iterations. +test('worst-case: forwarding + queue-jump + timeouts + cancellation at scale', { timeout: 60_000 }, async () => { + const ParentEvent = BaseEvent.extend('WC_Parent', { + iteration: z.number(), + }) + const ChildEvent = BaseEvent.extend('WC_Child', { + iteration: z.number(), + }) + const GrandchildEvent = BaseEvent.extend('WC_Grandchild', { + iteration: z.number(), + }) + + const total_iterations = 2000 + const history_limit = total_iterations * 2 + // Keep enough history to avoid trimming inflight events during perf runs. + const bus_a = new EventBus('WC_A', { max_history_size: history_limit }) + const bus_b = new EventBus('WC_B', { max_history_size: history_limit }) + const bus_c = new EventBus('WC_C', { max_history_size: history_limit }) + let parent_handled_a = 0 + let parent_handled_b = 0 + let child_handled_c = 0 + let grandchild_handled = 0 + let timeout_count = 0 + let cancel_count = 0 + + // Persistent handler on bus_b β€” just counts + bus_b.on(ParentEvent, () => { + parent_handled_b += 1 + }) + + // Persistent handler on bus_c β€” processes child, emits grandchild + bus_c.on(ChildEvent, async (event) => { + child_handled_c += 1 + const gc = event.bus?.emit(GrandchildEvent({ iteration: (event as any).iteration }))! + bus_c.dispatch(gc) + await gc.done() + }) + + // Persistent handler on bus_c for grandchild β€” slow on timeout iterations + // so the child's 5ms timeout fires while this is still sleeping. + // This creates EventHandlerTimeoutError β†’ EventHandlerCancelledError chains. + // Sleep is 50ms but child timeout is 5ms β€” with cancellation of started handlers, + // the child completes immediately when timeout fires. Background sleep continues + // silently (JS can't cancel async functions, but the event system moves on). + bus_c.on(GrandchildEvent, async (event) => { + grandchild_handled += 1 + if ((event as any).iteration % 5 === 0) { + await new Promise((r) => setTimeout(r, 50)) + } + }) + + global.gc?.() + const mem_before = process.memoryUsage() + const t0 = Date.now() + + for (let i = 0; i < total_iterations; i += 1) { + const should_timeout = i % 5 === 0 + + // Ephemeral handler on bus_a β€” queue-jumps a child to bus_c + const ephemeral_handler = async (event: any) => { + parent_handled_a += 1 + const child_timeout = should_timeout ? 0.005 : null // 5ms timeout β†’ fires while grandchild sleeps 50ms + const child = event.bus?.emit( + ChildEvent({ + iteration: i, + event_timeout: child_timeout, + }) + )! + bus_c.dispatch(child) + try { + await child.done() + } catch { + // Swallow β€” timeout errors are expected + } + } + bus_a.on(ParentEvent, ephemeral_handler) + + // Dispatch parent to bus_a (with handler) and bus_b (forwarding) + const parent = ParentEvent({ iteration: i }) + const ev_a = bus_a.dispatch(parent) + bus_b.dispatch(parent) + + await ev_a.done() + // Don't waitUntilIdle on bus_c here β€” timed-out grandchild handlers are + // still sleeping in the background (JS can't cancel async functions). + // Let them pile up; the final waitUntilIdle() outside the loop will drain. + + // Deregister ephemeral handler + bus_a.off(ParentEvent, ephemeral_handler) + + // Periodic find() with short timeout β€” exercises find_waiter cleanup + if (i % 10 === 0) { + // Don't await β€” let it timeout in the background + bus_a.find(ParentEvent, { future: 0.001 }) + } + } + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() + + // Count timeouts and cancellations from bus_c's history + for (const event of bus_c.event_history.values()) { + for (const result of event.event_results.values()) { + if (result.error instanceof EventHandlerTimeoutError) timeout_count += 1 + if (result.error instanceof EventHandlerCancelledError) cancel_count += 1 + } + } + + const t_done = Date.now() + const mem_done = process.memoryUsage() + + global.gc?.() + // Short delay to let find() timeouts and timed-out handler promises settle + await new Promise((r) => setTimeout(r, 50)) + global.gc?.() + const mem_gc = process.memoryUsage() + + const total_ms = t_done - t0 + const estimated_events = total_iterations * 3 + const mem_delta_mb = (mem_gc.heapUsed - mem_before.heapUsed) / 1024 / 1024 + + console.log( + `\n worst-case: ${total_iterations} iterations in ${total_ms}ms (${Math.round(total_iterations / (total_ms / 1000))}/s)` + + `\n parent: bus_a=${parent_handled_a} bus_b=${parent_handled_b}` + + `\n child: bus_c=${child_handled_c} | grandchild=${grandchild_handled}` + + `\n timeouts=${timeout_count} cancellations=${cancel_count}` + + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB (delta=${mem_delta_mb.toFixed(1)}MB)` + + `\n per-event (est): time=${(total_ms / estimated_events).toFixed(4)}ms | heap=${((mem_done.heapUsed - mem_before.heapUsed) / estimated_events / 1024).toFixed(2)}KB | heap_gc=${((mem_gc.heapUsed - mem_before.heapUsed) / estimated_events / 1024).toFixed(2)}KB` + + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + + `\n history: a=${bus_a.event_history.size} b=${bus_b.event_history.size} c=${bus_c.event_history.size}` + + `\n handlers: a=${bus_a.handlers.size} b=${bus_b.handlers.size} c=${bus_c.handlers.size}` + + `\n instances: ${EventBus._all_instances.size}` + ) + + // All iterations processed + assert.equal(parent_handled_a, total_iterations) + assert.equal(parent_handled_b, total_iterations) + + // History bounded by max_history_size + assert.ok(bus_a.event_history.size <= history_limit, `bus_a history ${bus_a.event_history.size} > ${history_limit}`) + assert.ok(bus_b.event_history.size <= history_limit, `bus_b history ${bus_b.event_history.size} > ${history_limit}`) + assert.ok(bus_c.event_history.size <= history_limit, `bus_c history ${bus_c.event_history.size} > ${history_limit}`) + + // Ephemeral handlers all cleaned up + assert.equal(bus_a.handlers.size, 0, 'All ephemeral handlers removed from bus_a') + + // Memory should not grow unbounded β€” allow 50MB over baseline + assert.ok(mem_delta_mb < 50, `Memory grew ${mem_delta_mb.toFixed(1)}MB over baseline (limit 50MB)`) + + bus_a.destroy() + bus_b.destroy() + bus_c.destroy() + + assert.equal(EventBus._all_instances.size, 0, 'All buses destroyed') +}) diff --git a/bubus-ts/tests/retry.test.ts b/bubus-ts/tests/retry.test.ts new file mode 100644 index 0000000..711889f --- /dev/null +++ b/bubus-ts/tests/retry.test.ts @@ -0,0 +1,1186 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { + BaseEvent, + EventBus, + retry, + clearSemaphoreRegistry, + RetryTimeoutError, + SemaphoreTimeoutError, +} from '../src/index.js' + +const delay = (ms: number): Promise => new Promise((resolve) => setTimeout(resolve, ms)) + +// ─── Basic retry behavior ──────────────────────────────────────────────────── + +test('retry: function succeeds on first attempt with no retries needed', async () => { + const fn = retry({ max_attempts: 3 })(async () => 'ok') + assert.equal(await fn(), 'ok') +}) + +test('retry: function retries on failure and eventually succeeds', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3 })(async () => { + calls++ + if (calls < 3) throw new Error(`fail ${calls}`) + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: throws after exhausting all attempts', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3 })(async () => { + calls++ + throw new Error('always fails') + }) + await assert.rejects(fn, { message: 'always fails' }) + assert.equal(calls, 3) +}) + +test('retry: max_attempts=1 means no retries (single attempt)', async () => { + let calls = 0 + const fn = retry({ max_attempts: 1 })(async () => { + calls++ + throw new Error('fail') + }) + await assert.rejects(fn, { message: 'fail' }) + assert.equal(calls, 1) +}) + +test('retry: default max_attempts=1 means single attempt', async () => { + let calls = 0 + const fn = retry()(async () => { + calls++ + throw new Error('fail') + }) + await assert.rejects(fn, { message: 'fail' }) + assert.equal(calls, 1) +}) + +// ─── retry_after delay ─────────────────────────────────────────────────────── + +test('retry: retry_after introduces delay between attempts', async () => { + let calls = 0 + const timestamps: number[] = [] + const fn = retry({ max_attempts: 3, retry_after: 0.05 })(async () => { + calls++ + timestamps.push(performance.now()) + if (calls < 3) throw new Error('fail') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) + + // Check that delays were at least ~50ms between attempts + const gap1 = timestamps[1] - timestamps[0] + const gap2 = timestamps[2] - timestamps[1] + assert.ok(gap1 >= 40, `expected >=40ms gap, got ${gap1.toFixed(1)}ms`) + assert.ok(gap2 >= 40, `expected >=40ms gap, got ${gap2.toFixed(1)}ms`) +}) + +// ─── Exponential backoff ───────────────────────────────────────────────────── + +test('retry: retry_backoff_factor increases delay between attempts', async () => { + let calls = 0 + const timestamps: number[] = [] + const fn = retry({ max_attempts: 4, retry_after: 0.03, retry_backoff_factor: 2.0 })(async () => { + calls++ + timestamps.push(performance.now()) + if (calls < 4) throw new Error('fail') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 4) + + // Delays: 30ms, 60ms, 120ms (0.03 * 2^0, 0.03 * 2^1, 0.03 * 2^2) + const gap1 = timestamps[1] - timestamps[0] + const gap2 = timestamps[2] - timestamps[1] + const gap3 = timestamps[3] - timestamps[2] + + assert.ok(gap1 >= 20, `gap1=${gap1.toFixed(1)}ms, expected >=20ms`) + assert.ok(gap2 >= 45, `gap2=${gap2.toFixed(1)}ms, expected >=45ms (should be ~60ms)`) + assert.ok(gap3 >= 90, `gap3=${gap3.toFixed(1)}ms, expected >=90ms (should be ~120ms)`) + // Verify backoff is actually increasing + assert.ok(gap2 > gap1, 'gap2 should be larger than gap1') + assert.ok(gap3 > gap2, 'gap3 should be larger than gap2') +}) + +// ─── retry_on_errors filtering ─────────────────────────────────────────────── + +class NetworkError extends Error { + constructor(message: string = 'network error') { + super(message) + this.name = 'NetworkError' + } +} + +class ValidationError extends Error { + constructor(message: string = 'validation error') { + super(message) + this.name = 'ValidationError' + } +} + +test('retry: retry_on_errors retries only matching error types', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [NetworkError] })(async () => { + calls++ + if (calls < 3) throw new NetworkError() + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: retry_on_errors does not retry non-matching errors', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [NetworkError] })(async () => { + calls++ + throw new ValidationError() + }) + await assert.rejects(fn, { name: 'ValidationError' }) + // Should have thrown immediately without retrying + assert.equal(calls, 1) +}) + +test('retry: retry_on_errors accepts string error name', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: ['NetworkError'] })(async () => { + calls++ + if (calls < 3) throw new NetworkError() + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: retry_on_errors string matcher does not retry non-matching names', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: ['NetworkError'] })(async () => { + calls++ + throw new ValidationError() + }) + await assert.rejects(fn, { name: 'ValidationError' }) + assert.equal(calls, 1) +}) + +test('retry: retry_on_errors accepts RegExp pattern', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [/network/i] })(async () => { + calls++ + if (calls < 3) throw new NetworkError('Network timeout occurred') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: retry_on_errors RegExp does not retry non-matching errors', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [/network/i] })(async () => { + calls++ + throw new ValidationError('bad input') + }) + await assert.rejects(fn, { name: 'ValidationError' }) + assert.equal(calls, 1) +}) + +test('retry: retry_on_errors mixes class, string, and RegExp matchers', async () => { + let calls = 0 + const fn = retry({ max_attempts: 5, retry_on_errors: [TypeError, 'NetworkError', /timeout/i] })(async () => { + calls++ + if (calls === 1) throw new TypeError('type error') + if (calls === 2) throw new NetworkError() + if (calls === 3) throw new Error('Connection timeout') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 4) +}) + +test('retry: retry_on_errors with multiple error types', async () => { + let calls = 0 + const fn = retry({ max_attempts: 5, retry_on_errors: [NetworkError, TypeError] })(async () => { + calls++ + if (calls === 1) throw new NetworkError() + if (calls === 2) throw new TypeError('type error') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +// ─── Per-attempt timeout ───────────────────────────────────────────────────── + +test('retry: timeout triggers RetryTimeoutError on slow attempts', async () => { + let calls = 0 + const fn = retry({ max_attempts: 1, timeout: 0.05 })(async () => { + calls++ + await delay(200) + return 'ok' + }) + await assert.rejects(fn, (error: unknown) => { + assert.ok(error instanceof RetryTimeoutError) + assert.equal(error.attempt, 1) + return true + }) + assert.equal(calls, 1) +}) + +test('retry: timeout allows fast attempts to succeed', async () => { + const fn = retry({ max_attempts: 1, timeout: 1 })(async () => { + await delay(5) + return 'fast' + }) + assert.equal(await fn(), 'fast') +}) + +test('retry: timed-out attempts are retried when max_attempts > 1', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, timeout: 0.05 })(async () => { + calls++ + if (calls < 3) { + await delay(200) // will timeout + return 'slow' + } + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +// ─── Semaphore concurrency control ────────────────────────────────────────── + +test('retry: semaphore_limit controls max concurrent executions', async (t) => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const fn = retry({ max_attempts: 1, semaphore_limit: 2, semaphore_name: 'test_sem_limit' })(async () => { + active++ + max_active = Math.max(max_active, active) + await delay(50) + active-- + }) + + // Launch 6 concurrent calls β€” should only run 2 at a time + await Promise.all([fn(), fn(), fn(), fn(), fn(), fn()]) + assert.equal(max_active, 2, 'should never exceed semaphore_limit=2') +}) + +test('retry: semaphore_lax=false throws SemaphoreTimeoutError when slots are full', async () => { + clearSemaphoreRegistry() + + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'test_sem_lax_false', + semaphore_lax: false, + semaphore_timeout: 0.05, + })(async () => { + await delay(200) // hold the semaphore for a while + return 'ok' + }) + + // Start one call to grab the semaphore + const first = fn() + + // Give the first call time to acquire the semaphore + await delay(10) + + // Second call should timeout trying to acquire semaphore + await assert.rejects( + fn(), + (error: unknown) => { + assert.ok(error instanceof SemaphoreTimeoutError) + assert.equal(error.semaphore_name, 'test_sem_lax_false') + return true + } + ) + + // Let the first call finish + assert.equal(await first, 'ok') +}) + +test('retry: semaphore_lax=true (default) proceeds without semaphore on timeout', async () => { + clearSemaphoreRegistry() + + let calls = 0 + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'test_sem_lax_true', + semaphore_lax: true, + semaphore_timeout: 0.05, + })(async () => { + calls++ + await delay(200) + return 'ok' + }) + + // Start first call to grab the semaphore + const first = fn() + await delay(10) + + // Second call should proceed anyway (lax mode) + const second = fn() + const results = await Promise.all([first, second]) + assert.deepEqual(results, ['ok', 'ok']) + assert.equal(calls, 2) +}) + +// ─── Preserves function metadata ───────────────────────────────────────────── + +test('retry: preserves function name', () => { + async function myNamedFunction(): Promise { + return 'ok' + } + const wrapped = retry()(myNamedFunction) + assert.equal(wrapped.name, 'myNamedFunction') +}) + +// ─── Preserves `this` context ──────────────────────────────────────────────── + +test('retry: preserves this context for methods', async () => { + class MyService { + value = 42 + fetch = retry({ max_attempts: 2 })(async function (this: MyService) { + return this.value + }) + } + + const svc = new MyService() + assert.equal(await svc.fetch(), 42) +}) + +// ─── Works with synchronous functions ──────────────────────────────────────── + +test('retry: wraps sync functions (result becomes a promise)', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3 })(() => { + calls++ + if (calls < 2) throw new Error('sync fail') + return 'sync ok' + }) + assert.equal(await fn(), 'sync ok') + assert.equal(calls, 2) +}) + +// ─── Integration with EventBus ─────────────────────────────────────────────── +// +// The recommended pattern is @retry() on the handler method + bus.on(Event, this.handler.bind(this)) +// These tests demonstrate the inline HOF form for simpler cases; the decorator form is tested below. + +test('retry: works as event bus handler wrapper (inline HOF)', async () => { + const bus = new EventBus('RetryBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + + let calls = 0 + bus.on( + TestEvent, + retry({ max_attempts: 3 })(async (_event) => { + calls++ + if (calls < 3) throw new Error(`handler fail ${calls}`) + return 'handler ok' + }) + ) + + const event = bus.dispatch(TestEvent({})) + await event.done() + + assert.equal(calls, 3) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'handler ok') +}) + +test('retry: bus handler with retry_on_errors only retries matching errors (inline HOF)', async () => { + const bus = new EventBus('RetryFilterBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + + let calls = 0 + bus.on( + TestEvent, + retry({ max_attempts: 3, retry_on_errors: [NetworkError] })(async (_event) => { + calls++ + throw new ValidationError() + }) + ) + + const event = bus.dispatch(TestEvent({})) + await event.done() + + // Should have failed immediately without retrying + assert.equal(calls, 1) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') +}) + +// ─── Edge cases ────────────────────────────────────────────────────────────── + +test('retry: max_attempts=0 is treated as 1 (minimum)', async () => { + let calls = 0 + const fn = retry({ max_attempts: 0 })(async () => { + calls++ + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 1) +}) + +test('retry: passes arguments through to wrapped function', async () => { + const fn = retry({ max_attempts: 1 })(async (a: number, b: string) => `${a}-${b}`) + assert.equal(await fn(1, 'hello'), '1-hello') +}) + +test('retry: semaphore is held across all retry attempts', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + let total_calls = 0 + + const fn = retry({ + max_attempts: 3, + semaphore_limit: 1, + semaphore_name: 'test_sem_across_retries', + })(async () => { + active++ + max_active = Math.max(max_active, active) + total_calls++ + await delay(10) + active-- + // Odd calls fail, even calls succeed β€” each invocation needs 2 attempts + if (total_calls % 2 === 1) throw new Error('fail') + return 'ok' + }) + + // Run 3 calls concurrently β€” they should run serially because semaphore_limit=1 + // The semaphore should be held across retries, so only 1 active at a time + const results = await Promise.all([fn(), fn(), fn()]) + assert.equal(max_active, 1, 'semaphore should enforce serial execution even during retries') + assert.deepEqual(results, ['ok', 'ok', 'ok']) + assert.equal(total_calls, 6, 'each of 3 calls should have taken 2 attempts') +}) + +test('retry: semaphore released even when all attempts fail', async () => { + clearSemaphoreRegistry() + + const fn = retry({ + max_attempts: 2, + semaphore_limit: 1, + semaphore_name: 'test_sem_release_on_fail', + })(async () => { + throw new Error('always fails') + }) + + // First call fails, should release semaphore + await assert.rejects(fn) + + // Second call should be able to acquire the semaphore (not deadlocked) + await assert.rejects(fn) +}) + +// ─── TC39 decorator syntax on class methods ────────────────────────────────── + +test('retry: works on class method via manual wrapping pattern', async () => { + // Since TC39 Stage 3 decorators require experimentalDecorators or TS 5.0+ native support, + // we test the equivalent pattern: applying retry() to a method post-definition. + class ApiClient { + base_url = 'https://example.com' + calls = 0 + + fetchData = retry({ max_attempts: 3 })(async function (this: ApiClient) { + this.calls++ + if (this.calls < 3) throw new Error('api error') + return `data from ${this.base_url}` + }) + } + + const client = new ApiClient() + assert.equal(await client.fetchData(), 'data from https://example.com') + assert.equal(client.calls, 3) +}) + +// ─── Re-entrancy / deadlock prevention ─────────────────────────────────────── + +test('retry: re-entrant call on same semaphore does not deadlock', async () => { + clearSemaphoreRegistry() + + const inner = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'shared_sem', + })(async () => { + return 'inner ok' + }) + + const outer = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'shared_sem', + })(async () => { + // This would deadlock without re-entrancy tracking: + // outer holds the semaphore, inner tries to acquire the same one + const result = await inner() + return `outer got: ${result}` + }) + + assert.equal(await outer(), 'outer got: inner ok') +}) + +test('retry: recursive function with semaphore does not deadlock', async () => { + clearSemaphoreRegistry() + + let depth = 0 + const recurse: (n: number) => Promise = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'recursive_sem', + })(async (n: number): Promise => { + depth++ + if (n <= 1) return 1 + return n + (await recurse(n - 1)) + }) + + const result = await recurse(5) + assert.equal(result, 15) // 5 + 4 + 3 + 2 + 1 + assert.equal(depth, 5) +}) + +test('retry: different semaphore names do not interfere with re-entrancy', async () => { + clearSemaphoreRegistry() + + let inner_active = 0 + let inner_max_active = 0 + + const inner = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'inner_sem', + })(async () => { + inner_active++ + inner_max_active = Math.max(inner_max_active, inner_active) + await delay(20) + inner_active-- + return 'inner ok' + }) + + const outer = retry({ + max_attempts: 1, + semaphore_limit: 2, + semaphore_name: 'outer_sem', + })(async () => { + return await inner() + }) + + // Run 3 outer calls concurrently + // outer_sem allows 2 concurrent, but inner_sem only allows 1 + const results = await Promise.all([outer(), outer(), outer()]) + assert.deepEqual(results, ['inner ok', 'inner ok', 'inner ok']) + assert.equal(inner_max_active, 1, 'inner semaphore should still enforce limit=1') +}) + +test('retry: three-level nested re-entrancy does not deadlock', async () => { + clearSemaphoreRegistry() + + const level3 = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'nested_sem', + })(async () => 'level3') + + const level2 = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'nested_sem', + })(async () => { + const r = await level3() + return `level2>${r}` + }) + + const level1 = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'nested_sem', + })(async () => { + const r = await level2() + return `level1>${r}` + }) + + assert.equal(await level1(), 'level1>level2>level3') +}) + +// ─── Semaphore scope ───────────────────────────────────────────────────────── + +test('retry: semaphore_scope=class shares semaphore across instances of same class', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Worker { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'work', + })(async function (this: Worker) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'done' + }) + } + + const a = new Worker() + const b = new Worker() + const c = new Worker() + + await Promise.all([a.run(), b.run(), c.run()]) + assert.equal(max_active, 1, 'class scope: all instances should share one semaphore') +}) + +test('retry: semaphore_scope=instance gives each instance its own semaphore', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Worker { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'work', + })(async function (this: Worker) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'done' + }) + } + + const a = new Worker() + const b = new Worker() + + // Same instance: serialized (limit=1 per instance) + // Different instances: can run in parallel (separate semaphores) + await Promise.all([a.run(), b.run()]) + assert.equal(max_active, 2, 'instance scope: different instances should get separate semaphores') +}) + +test('retry: semaphore_scope=instance serializes calls on same instance', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Worker { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'work', + })(async function (this: Worker) { + active++ + max_active = Math.max(max_active, active) + await delay(20) + active-- + return 'done' + }) + } + + const a = new Worker() + await Promise.all([a.run(), a.run(), a.run()]) + assert.equal(max_active, 1, 'instance scope: same instance calls should serialize') +}) + +test('retry: semaphore_scope=class isolates different classes', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Alpha { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'run', + })(async function (this: Alpha) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + }) + } + + class Beta { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'run', + })(async function (this: Beta) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + }) + } + + await Promise.all([new Alpha().run(), new Beta().run()]) + assert.equal(max_active, 2, 'class scope: different classes should get separate semaphores') +}) + +// ─── TC39 Stage 3 decorator syntax (RECOMMENDED PATTERN) ──────────────────── +// +// The primary supported pattern for event bus handlers is: +// +// class Service { +// constructor(bus) { +// bus.on(Event, this.on_Event.bind(this)) +// } +// +// @retry({ max_attempts: 3, ... }) +// async on_Event(event) { ... } +// } +// +// Retry/timeout is a handler-level concern. Event processing itself has no error +// state β€” only individual handlers produce errors/timeouts that need retrying. +// Event-level and handler-level concurrency on the bus is still controllable via +// event_concurrency / event_handler_concurrency options (those are separate). + +test('retry: @retry() TC39 decorator on class method retries on failure', async () => { + clearSemaphoreRegistry() + + class ApiService { + calls = 0 + + @retry({ max_attempts: 3 }) + async fetchData(): Promise { + this.calls++ + if (this.calls < 3) throw new Error('api error') + return 'data' + } + } + + const svc = new ApiService() + assert.equal(await svc.fetchData(), 'data') + assert.equal(svc.calls, 3) +}) + +test('retry: @retry() TC39 decorator preserves this context', async () => { + class Config { + endpoint = 'https://api.example.com' + + @retry({ max_attempts: 2 }) + async getEndpoint(): Promise { + return this.endpoint + } + } + + const cfg = new Config() + assert.equal(await cfg.getEndpoint(), 'https://api.example.com') +}) + +test('retry: @retry() TC39 decorator with semaphore_scope=class', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Service { + @retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'handle', + }) + async handle(): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + const a = new Service() + const b = new Service() + await Promise.all([a.handle(), b.handle()]) + assert.equal(max_active, 1, '@retry class scope: all instances share one semaphore') +}) + +test('retry: @retry() TC39 decorator with semaphore_scope=instance', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Service { + @retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'handle', + }) + async handle(): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + const a = new Service() + const b = new Service() + await Promise.all([a.handle(), b.handle()]) + assert.equal(max_active, 2, '@retry instance scope: different instances get separate semaphores') +}) + +test('retry: @retry() decorated method works with bus.on via bind', async () => { + const bus = new EventBus('DecoratorBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + + class Handler { + calls = 0 + + @retry({ max_attempts: 3 }) + async onTest(_event: InstanceType): Promise { + this.calls++ + if (this.calls < 3) throw new Error('handler fail') + return 'handler ok' + } + } + + const handler = new Handler() + bus.on(TestEvent, handler.onTest.bind(handler)) + + const event = bus.dispatch(TestEvent({})) + await event.done() + assert.equal(handler.calls, 3) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.result, 'handler ok') +}) + +// ─── Scope fallback to global ─────────────────────────────────────────────── + +test('retry: semaphore_scope=class falls back to global for standalone functions', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'standalone_class', + })(async () => { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }) + + // Two concurrent calls should serialize since they share the same global-fallback semaphore + const results = await Promise.all([fn(), fn()]) + assert.deepEqual(results, ['ok', 'ok']) + assert.equal(max_active, 1, 'class scope on standalone fn should fall back to global and serialize') +}) + +test('retry: semaphore_scope=instance falls back to global for standalone functions', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'standalone_instance', + })(async () => { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }) + + // Two concurrent calls should serialize since they share the same global-fallback semaphore + const results = await Promise.all([fn(), fn()]) + assert.deepEqual(results, ['ok', 'ok']) + assert.equal(max_active, 1, 'instance scope on standalone fn should fall back to global and serialize') +}) + +// ─── @retry() decorator + bus.on via .bind(this) β€” all three scopes ───────── + +test('retry: @retry(scope=class) + bus.on via .bind β€” serializes across instances', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('ScopeClassBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('ScopeClassEvent', {}) + + let active = 0 + let max_active = 0 + + class SomeService { + constructor(b: InstanceType) { + b.on(SomeEvent, this.on_SomeEvent.bind(this)) + } + + @retry({ max_attempts: 1, semaphore_scope: 'class', semaphore_limit: 1, semaphore_name: 'on_SomeEvent' }) + async on_SomeEvent(_event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + // Two instances register handlers on the same bus + // Small delay between registrations to ensure unique handler IDs (bus uses ms-precision timestamps in handler ID hash) + new SomeService(bus) + await delay(2) + new SomeService(bus) + + const event = bus.dispatch(SomeEvent({})) + await event.done() + + // class scope + limit=1: only 1 handler should run at a time across both instances + assert.equal(max_active, 1, 'class scope should serialize across instances') +}) + +test('retry: @retry(scope=instance) + bus.on via .bind β€” isolates per instance', async () => { + const bus = new EventBus('ScopeInstanceBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('ScopeInstanceEvent', {}) + + let active = 0 + let max_active = 0 + + class SomeService { + constructor(b: InstanceType) { + b.on(SomeEvent, this.on_SomeEvent.bind(this)) + } + + @retry({ max_attempts: 1, semaphore_scope: 'instance', semaphore_limit: 1, semaphore_name: 'on_SomeEvent_inst' }) + async on_SomeEvent(_event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + total_calls++ + await delay(200) + active-- + return 'ok' + } + } + + let total_calls = 0 + + // Two instances register handlers β€” each gets its own semaphore + // Small delay between registrations to ensure unique handler IDs (bus uses ms-precision timestamps in handler ID hash) + new SomeService(bus) + await delay(2) + new SomeService(bus) + + const event = bus.dispatch(SomeEvent({})) + await event.done() + + // instance scope: 2 different instances can run in parallel + assert.equal(total_calls, 2, 'both handlers should have run') + assert.equal(max_active, 2, `instance scope should allow different instances to run in parallel (got max_active=${max_active}, total_calls=${total_calls})`) +}) + +test('retry: @retry(scope=global) + bus.on via .bind β€” all calls share one semaphore', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('ScopeGlobalBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('ScopeGlobalEvent', {}) + + let active = 0 + let max_active = 0 + + class SomeService { + constructor(b: InstanceType) { + b.on(SomeEvent, this.on_SomeEvent.bind(this)) + } + + @retry({ max_attempts: 1, semaphore_scope: 'global', semaphore_limit: 1, semaphore_name: 'on_SomeEvent' }) + async on_SomeEvent(_event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + // Small delay between registrations to ensure unique handler IDs + new SomeService(bus) + await delay(2) + new SomeService(bus) + + const event = bus.dispatch(SomeEvent({})) + await event.done() + + // global scope: all calls serialized + assert.equal(max_active, 1, 'global scope should serialize all calls') +}) + +// ─── HOF pattern: retry({...})(fn).bind(instance) β€” alternative to decorator ─ + +test('retry: HOF retry()(fn).bind(instance) β€” instance scope works when bind is after wrap', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('HOFBindBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('HOFBindEvent', {}) + + let active = 0 + let max_active = 0 + + const some_instance_a = { name: 'a' } + const some_instance_b = { name: 'b' } + + const handler = retry({ + max_attempts: 1, + semaphore_scope: 'instance', + semaphore_limit: 1, + semaphore_name: 'handler', + })(async function (this: any, _event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }) + + // bind AFTER wrapping β†’ wrapper receives correct `this` for scoping + bus.on(SomeEvent, handler.bind(some_instance_a)) + bus.on(SomeEvent, handler.bind(some_instance_b)) + + const event = bus.dispatch(SomeEvent({})) + await event.done() + + // Two different instances β†’ separate semaphores β†’ can run in parallel + assert.equal(max_active, 2, 'bind-after-wrap: different instances should run in parallel') +}) + +// ─── HOF pattern: retry({...})(fn.bind(instance)) β€” bind BEFORE wrapping ──── +// NOTE: This falls back to global scope because JS cannot extract [[BoundThis]] +// from a bound function. The handler works correctly (this is preserved inside +// the handler), but the semaphore scoping cannot see the bound instance. +// Recommendation: use retry({...})(fn).bind(instance) instead. + +test('retry: HOF retry()(fn.bind(instance)) β€” scope falls back to global (bind before wrap)', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const instance_a = { name: 'a' } + const instance_b = { name: 'b' } + + const make_handler = (inst: object) => + retry({ + max_attempts: 1, + semaphore_scope: 'instance', + semaphore_limit: 1, + semaphore_name: 'handler_bind_before', + })( + (async function (this: any, _event: any): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }).bind(inst) + ) + + const handler_a = make_handler(instance_a) + const handler_b = make_handler(instance_b) + + // Both handlers fall back to global scope (same semaphore), so they serialize + await Promise.all([handler_a('event1'), handler_b('event2')]) + assert.equal(max_active, 1, 'bind-before-wrap: scoping falls back to global (serialized)') +}) + +// ─── retry wrapping emitβ†’done (TECHNICALLY SUPPORTED, NOT RECOMMENDED) ────── +// +// This pattern wraps an entire emitβ†’done cycle in retry(), so each retry +// dispatches a brand new event. It works, but is discouraged because: +// +// 1. Architecture: retry/timeout belongs on the handler, not the emit site. +// The emitter doesn't know which handler failed or why β€” the handler does. +// +// 2. Replayability: each retry produces a separate event in the log, making +// replays non-deterministic. If the original run needed 3 attempts, a replay +// that succeeds on attempt 1 produces a different event topology. +// +// 3. Determinism: the same emit may reach different handlers with different +// failure modes; retrying the whole dispatch is a blunt instrument. +// +// Prefer: @retry() on the handler method, so retries are transparent to the +// event log and controlled by the code that understands the failure. + +test('retry: retry wrapping emitβ†’done retries the full dispatch cycle (discouraged pattern)', async () => { + const bus = new EventBus('RetryEmitBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + + const TabsEvent = BaseEvent.extend('TabsEvent', {}) + const DOMEvent = BaseEvent.extend('DOMEvent', {}) + const ScreenshotEvent = BaseEvent.extend('ScreenshotEvent', {}) + + let tabs_attempts = 0 + let dom_calls = 0 + let screenshot_calls = 0 + + bus.on(TabsEvent, async (_event) => { + tabs_attempts++ + if (tabs_attempts < 3) throw new Error(`tabs fail attempt ${tabs_attempts}`) + return 'tabs ok' + }) + + bus.on(DOMEvent, async (_event) => { + dom_calls++ + return 'dom ok' + }) + + bus.on(ScreenshotEvent, async (_event) => { + screenshot_calls++ + return 'screenshot ok' + }) + + const [tabs_event, dom_event, screenshot_event] = await Promise.all([ + // retry wraps the full emitβ†’done cycle β€” each retry dispatches a fresh event + retry({ max_attempts: 4 })(async () => { + const event = bus.emit(TabsEvent({})) + await event.done() + if (event.event_errors.length) throw event.event_errors[0] + return event + })(), + + // these two race in parallel alongside the retrying tabs event + bus.emit(DOMEvent({})).done(), + bus.emit(ScreenshotEvent({})).done(), + ]) + + // tabs needed 3 attempts (2 failures + 1 success) + assert.equal(tabs_attempts, 3) + assert.equal(tabs_event.event_status, 'completed') + + // dom and screenshot ran once each, in parallel with the tabs retries + assert.equal(dom_calls, 1) + assert.equal(screenshot_calls, 1) + assert.equal(dom_event.event_status, 'completed') + assert.equal(screenshot_event.event_status, 'completed') +}) diff --git a/bubus-ts/tests/timeout.test.ts b/bubus-ts/tests/timeout.test.ts new file mode 100644 index 0000000..c584110 --- /dev/null +++ b/bubus-ts/tests/timeout.test.ts @@ -0,0 +1,1319 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus, EventHandlerCancelledError, EventHandlerAbortedError, EventHandlerTimeoutError } from '../src/index.js' +import { LockManager } from '../src/lock_manager.js' + +const TimeoutEvent = BaseEvent.extend('TimeoutEvent', {}) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('handler timeout marks EventResult as error', async () => { + const bus = new EventBus('TimeoutBus') + + bus.on(TimeoutEvent, async () => { + await delay(50) + return 'slow' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerTimeoutError) +}) + +test('handler completes within timeout', async () => { + const bus = new EventBus('TimeoutOkBus') + + bus.on(TimeoutEvent, async () => { + await delay(5) + return 'fast' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.5 })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'fast') +}) + +test('event handler errors expose event_result, cause, and timeout metadata', async () => { + const bus = new EventBus('ErrorMetadataBus') + + const ParentCancelEvent = BaseEvent.extend('ParentCancelEvent', {}) + const PendingChildEvent = BaseEvent.extend('PendingChildEvent', {}) + const ParentAbortEvent = BaseEvent.extend('ParentAbortEvent', {}) + const AbortChildEvent = BaseEvent.extend('AbortChildEvent', {}) + + bus.on(TimeoutEvent, async () => { + await delay(40) + return 'slow' + }) + + bus.on(PendingChildEvent, async () => { + await delay(5) + return 'pending_child' + }) + + let pending_child: BaseEvent | null = null + bus.on(ParentCancelEvent, async (event) => { + pending_child = event.bus?.emit(PendingChildEvent({ event_timeout: 0.5 })) ?? null + await delay(80) + }) + + bus.on(AbortChildEvent, async () => { + await delay(120) + return 'abort_child' + }) + + let aborted_child: BaseEvent | null = null + bus.on(ParentAbortEvent, async (event) => { + aborted_child = event.bus?.emit(AbortChildEvent({ event_timeout: 0.5 })) ?? null + await aborted_child?.done() + }) + + const timeout_event = bus.dispatch(TimeoutEvent({ event_timeout: 0.02 })) + await timeout_event.done() + + const timeout_result = Array.from(timeout_event.event_results.values())[0] + const timeout_error = timeout_result.error as EventHandlerTimeoutError + assert.ok(timeout_error.cause instanceof Error) + assert.equal(timeout_error.cause.name, 'TimeoutError') + assert.equal(timeout_error.event_result, timeout_result) + assert.equal(timeout_error.timeout_seconds, timeout_event.event_timeout) + assert.equal(timeout_error.event.event_id, timeout_event.event_id) + assert.equal(timeout_error.event_type, timeout_event.event_type) + assert.equal(timeout_error.handler_name, timeout_result.handler_name) + assert.equal(timeout_error.handler_id, timeout_result.handler_id) + assert.equal(timeout_error.event_timeout, timeout_event.event_timeout) + + const cancel_parent = bus.dispatch(ParentCancelEvent({ event_timeout: 0.02 })) + await cancel_parent.done() + await bus.waitUntilIdle() + + assert.ok(pending_child, 'pending_child should have been emitted') + const pending_result = Array.from(pending_child!.event_results.values())[0] + const cancelled_error = pending_result.error as EventHandlerCancelledError + const cancel_parent_result = Array.from(cancel_parent.event_results.values())[0] + const cancel_parent_error = cancel_parent_result.error as EventHandlerTimeoutError + assert.equal(cancelled_error.cause, cancel_parent_error) + assert.equal(cancelled_error.event_result, pending_result) + assert.equal(cancelled_error.event.event_id, pending_child!.event_id) + assert.equal(cancelled_error.timeout_seconds, pending_child!.event_timeout) + assert.equal(cancelled_error.event_type, pending_child!.event_type) + assert.equal(cancelled_error.handler_name, pending_result.handler_name) + assert.equal(cancelled_error.handler_id, pending_result.handler_id) + + const abort_parent = bus.dispatch(ParentAbortEvent({ event_timeout: 0.05 })) + await abort_parent.done() + await bus.waitUntilIdle() + + assert.ok(aborted_child, 'aborted_child should have been emitted') + const aborted_result = Array.from(aborted_child!.event_results.values())[0] + const aborted_error = aborted_result.error as EventHandlerAbortedError + const abort_parent_result = Array.from(abort_parent.event_results.values())[0] + const abort_parent_error = abort_parent_result.error as EventHandlerTimeoutError + assert.equal(aborted_error.cause, abort_parent_error) + assert.equal(aborted_error.event_result, aborted_result) + assert.equal(aborted_error.event.event_id, aborted_child!.event_id) + assert.equal(aborted_error.timeout_seconds, aborted_child!.event_timeout) + assert.equal(aborted_error.event_type, aborted_child!.event_type) + assert.equal(aborted_error.handler_name, aborted_result.handler_name) + assert.equal(aborted_error.handler_id, aborted_result.handler_id) +}) + +test('handler timeouts fire across concurrency modes', async () => { + const modes = ['global-serial', 'bus-serial', 'parallel'] as const + + for (const event_mode of modes) { + for (const handler_mode of modes) { + const bus = new EventBus(`Timeout-${event_mode}-${handler_mode}`, { + event_concurrency: event_mode, + event_handler_concurrency: handler_mode, + }) + + bus.on(TimeoutEvent, async () => { + await delay(50) + return 'slow' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error', `Expected timeout error for event=${event_mode} handler=${handler_mode}`) + assert.ok( + result.error instanceof EventHandlerTimeoutError, + `Expected EventHandlerTimeoutError for event=${event_mode} handler=${handler_mode}` + ) + + await bus.waitUntilIdle() + } + } +}) + +test('timeout still marks event failed when other handlers finish', async () => { + const bus = new EventBus('TimeoutParallelHandlers', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + const results: string[] = [] + + bus.on(TimeoutEvent, async () => { + await delay(1) + results.push('fast') + return 'fast' + }) + + bus.on(TimeoutEvent, async () => { + await delay(50) + results.push('slow') + return 'slow' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() + + const statuses = Array.from(event.event_results.values()).map((result) => result.status) + assert.ok(statuses.includes('completed')) + assert.ok(statuses.includes('error')) + assert.equal(event.event_status, 'completed') + assert.ok(event.event_errors.length > 0) + assert.ok(results.includes('fast')) +}) + +test('slow event warning fires when event exceeds event_slow_timeout', async () => { + const bus = new EventBus('SlowEventWarnBus', { + event_slow_timeout: 0.01, + event_handler_slow_timeout: null, + }) + const warnings: string[] = [] + const original_warn = console.warn + console.warn = (message?: unknown, ...args: unknown[]) => { + warnings.push(String(message)) + if (args.length > 0) { + warnings.push(args.map(String).join(' ')) + } + } + + try { + bus.on(TimeoutEvent, async () => { + await delay(25) + return 'ok' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.5 })) + await event.done() + } finally { + console.warn = original_warn + } + + assert.ok( + warnings.some((message) => message.toLowerCase().includes('slow event processing')), + 'Expected slow event warning' + ) +}) + +test('slow handler warning fires when handler runs long', async () => { + const bus = new EventBus('SlowHandlerWarnBus', { + event_handler_slow_timeout: 0.01, + event_slow_timeout: null, + }) + const warnings: string[] = [] + const original_warn = console.warn + console.warn = (message?: unknown, ...args: unknown[]) => { + warnings.push(String(message)) + if (args.length > 0) { + warnings.push(args.map(String).join(' ')) + } + } + + try { + bus.on(TimeoutEvent, async () => { + await delay(25) + return 'ok' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.5 })) + await event.done() + } finally { + console.warn = original_warn + } + + assert.ok( + warnings.some((message) => message.toLowerCase().includes('slow event handler')), + 'Expected slow handler warning' + ) +}) + +test('slow handler and slow event warnings can both fire', async () => { + const bus = new EventBus('SlowComboWarnBus', { + event_handler_slow_timeout: 0.01, + event_slow_timeout: 0.01, + }) + const warnings: string[] = [] + const original_warn = console.warn + console.warn = (message?: unknown, ...args: unknown[]) => { + warnings.push(String(message)) + if (args.length > 0) { + warnings.push(args.map(String).join(' ')) + } + } + + try { + bus.on(TimeoutEvent, async () => { + await delay(25) + return 'ok' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.5 })) + await event.done() + } finally { + console.warn = original_warn + } + + assert.ok( + warnings.some((message) => message.toLowerCase().includes('slow event handler')), + 'Expected slow handler warning' + ) + assert.ok( + warnings.some((message) => message.toLowerCase().includes('slow event processing')), + 'Expected slow event warning' + ) +}) + +test('event-level concurrency overrides do not bypass timeouts', async () => { + const bus = new EventBus('TimeoutEventOverrideBus', { + event_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', + }) + + bus.on(TimeoutEvent, async () => { + await delay(50) + return 'slow' + }) + + const event = bus.dispatch( + TimeoutEvent({ + event_timeout: 0.01, + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + ) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerTimeoutError) +}) + +test('handler-level concurrency overrides do not bypass timeouts', async () => { + const bus = new EventBus('TimeoutHandlerOverrideBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'global-serial', + }) + + const order: string[] = [] + + bus.on( + TimeoutEvent, + async () => { + order.push('slow_start') + await delay(50) + order.push('slow_end') + return 'slow' + }, + { event_handler_concurrency: 'bus-serial' } + ) + + bus.on( + TimeoutEvent, + async () => { + order.push('fast_start') + await delay(1) + order.push('fast_end') + return 'fast' + }, + { event_handler_concurrency: 'parallel' } + ) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() + + const statuses = Array.from(event.event_results.values()).map((result) => result.status) + assert.ok(statuses.includes('error')) + assert.ok(statuses.includes('completed')) + assert.ok(order.includes('fast_start')) +}) + +test('forwarded event timeouts apply across buses', async () => { + const bus_a = new EventBus('TimeoutForwardA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('TimeoutForwardB', { event_concurrency: 'bus-serial' }) + + bus_a.on(TimeoutEvent, async (event) => { + bus_b.dispatch(event) + }) + + bus_b.on(TimeoutEvent, async () => { + await delay(50) + return 'slow' + }) + + const event = bus_a.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() + + const results = Array.from(event.event_results.values()) + const bus_b_result = results.find((result) => result.eventbus_name === 'TimeoutForwardB') + assert.ok(bus_b_result) + assert.equal(bus_b_result?.status, 'error') + assert.ok(bus_b_result?.error instanceof EventHandlerTimeoutError) +}) + +test('queue-jump awaited child timeouts still fire across buses', async () => { + const ParentEvent = BaseEvent.extend('TimeoutParentEvent', {}) + const ChildEvent = BaseEvent.extend('TimeoutChildEvent', {}) + + const bus_a = new EventBus('TimeoutQueueJumpA', { event_concurrency: 'global-serial' }) + const bus_b = new EventBus('TimeoutQueueJumpB', { event_concurrency: 'global-serial' }) + + let child_ref: InstanceType | null = null + + bus_b.on(ChildEvent, async () => { + await delay(50) + return 'slow' + }) + + bus_a.on(ParentEvent, async (event) => { + // Use scoped bus emit to set parent tracking (event_parent_id, event_emitted_by_handler_id), + // then also dispatch on bus_b for cross-bus handler execution. + // Without parent tracking, processEventImmediately can't detect the queue-jump context + // and falls back to waitForCompletion(), which deadlocks with global-serial. + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.01 }))! + bus_b.dispatch(child) + child_ref = child + await child.done() + }) + + const parent = bus_a.dispatch(ParentEvent({ event_timeout: 0.5 })) + await parent.done() + + assert.ok(child_ref) + const child_results = Array.from(child_ref!.event_results.values()) + const timeout_result = child_results.find((result) => result.error instanceof EventHandlerTimeoutError) + assert.ok(timeout_result) +}) + +const STEP1_HANDLER_MODES = ['bus-serial', 'global-serial'] as const +type Step1HandlerMode = (typeof STEP1_HANDLER_MODES)[number] + +const getHandlerSemaphore = (bus: EventBus, mode: Step1HandlerMode) => + mode === 'global-serial' ? LockManager.global_handler_semaphore : bus.locks.bus_handler_semaphore + +for (const handler_mode of STEP1_HANDLER_MODES) { + test(`regression: timeout during awaited child.done() does not leak handler semaphore lock [${handler_mode}]`, async () => { + const ParentEvent = BaseEvent.extend(`TimeoutLeakParent-${handler_mode}`, {}) + const ChildEvent = BaseEvent.extend(`TimeoutLeakChild-${handler_mode}`, {}) + + const bus = new EventBus(`TimeoutLeakBus-${handler_mode}`, { + event_concurrency: 'bus-serial', + event_handler_concurrency: handler_mode, + }) + const semaphore = getHandlerSemaphore(bus, handler_mode) + const baseline_in_use = semaphore.in_use + const original_acquire = semaphore.acquire.bind(semaphore) + let acquire_count = 0 + + semaphore.acquire = async () => { + acquire_count += 1 + // Third acquire is the parent reclaim in processEventImmediately finally. + // Delay it so the parent handler timeout can fire in the middle. + if (acquire_count === 3) { + await delay(30) + } + await original_acquire() + } + + try { + bus.on(ChildEvent, async () => { + await delay(1) + return 'child_done' + }) + + bus.on(ParentEvent, async (event) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.2 }))! + await child.done() + return 'parent_done' + }) + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.01 })) + await parent.done() + await bus.waitUntilIdle() + + const parent_result = Array.from(parent.event_results.values())[0] + assert.equal(parent_result.status, 'error') + assert.ok(parent_result.error instanceof EventHandlerTimeoutError) + assert.equal( + semaphore.in_use, + baseline_in_use, + `handler semaphore leaked lock (mode=${handler_mode}, in_use=${semaphore.in_use}, baseline=${baseline_in_use}, acquires=${acquire_count})` + ) + } finally { + semaphore.acquire = original_acquire + while (semaphore.in_use > baseline_in_use) { + semaphore.release() + } + } + }) +} + +for (const handler_mode of STEP1_HANDLER_MODES) { + test(`regression: parent timeout while reacquire waits behind third serial handler is lock-safe [${handler_mode}]`, async () => { + const ParentEvent = BaseEvent.extend(`TimeoutContentionParent-${handler_mode}`, {}) + const ChildEvent = BaseEvent.extend(`TimeoutContentionChild-${handler_mode}`, {}) + + const bus = new EventBus(`TimeoutContentionBus-${handler_mode}`, { + event_concurrency: 'bus-serial', + event_handler_concurrency: handler_mode, + }) + const semaphore = getHandlerSemaphore(bus, handler_mode) + const baseline_in_use = semaphore.in_use + + bus.on(ChildEvent, async () => { + await delay(2) + return 'child_done' + }) + + bus.on(ParentEvent, async (event) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.2, event_handler_concurrency: 'parallel' }))! + await child.done() + return 'parent_main' + }) + + // This handler queues behind parent_main, then holds the serial semaphore + // while parent_main is trying to reclaim after child.done() completes. + bus.on(ParentEvent, async () => { + await delay(40) + return 'parent_blocker' + }) + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.01 })) + await parent.done() + await bus.waitUntilIdle() + + const parent_results = Array.from(parent.event_results.values()) + const timeout_results = parent_results.filter((result) => result.error instanceof EventHandlerTimeoutError) + assert.ok(timeout_results.length >= 1, `expected at least one timeout result in ${handler_mode}`) + assert.equal(semaphore.in_use, baseline_in_use) + }) +} + +for (const handler_mode of STEP1_HANDLER_MODES) { + test(`regression: next event still runs on same bus after timeout queue-jump path [${handler_mode}]`, async () => { + const ParentEvent = BaseEvent.extend(`TimeoutFollowupParent-${handler_mode}`, {}) + const ChildEvent = BaseEvent.extend(`TimeoutFollowupChild-${handler_mode}`, {}) + const FollowupEvent = BaseEvent.extend(`TimeoutFollowupTail-${handler_mode}`, {}) + + const bus = new EventBus(`TimeoutFollowupBus-${handler_mode}`, { + event_concurrency: 'bus-serial', + event_handler_concurrency: handler_mode, + }) + const semaphore = getHandlerSemaphore(bus, handler_mode) + const baseline_in_use = semaphore.in_use + const original_acquire = semaphore.acquire.bind(semaphore) + let acquire_count = 0 + semaphore.acquire = async () => { + acquire_count += 1 + if (acquire_count === 3) { + await delay(30) + } + await original_acquire() + } + + let followup_runs = 0 + + try { + bus.on(ChildEvent, async () => { + await delay(1) + }) + + bus.on(ParentEvent, async (event) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.2 }))! + await child.done() + }) + + bus.on(FollowupEvent, async () => { + followup_runs += 1 + return 'followup_done' + }) + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.01 })) + await parent.done() + await bus.waitUntilIdle() + + const followup = bus.dispatch(FollowupEvent({ event_timeout: 0.05 })) + const followup_completed = await Promise.race([followup.done().then(() => true), delay(100).then(() => false)]) + + assert.equal( + followup_completed, + true, + `follow-up event stalled after timeout queue-jump path (mode=${handler_mode}, in_use=${semaphore.in_use}, acquires=${acquire_count})` + ) + assert.equal(followup_runs, 1) + assert.equal(semaphore.in_use, baseline_in_use) + } finally { + semaphore.acquire = original_acquire + while (semaphore.in_use > baseline_in_use) { + semaphore.release() + } + } + }) +} + +for (const handler_mode of STEP1_HANDLER_MODES) { + test(`regression: nested queue-jump with timeout cancellation remains lock-safe [${handler_mode}]`, async () => { + const ParentEvent = BaseEvent.extend(`NestedPermitParent-${handler_mode}`, {}) + const ChildEvent = BaseEvent.extend(`NestedPermitChild-${handler_mode}`, {}) + const GrandchildEvent = BaseEvent.extend(`NestedPermitGrandchild-${handler_mode}`, {}) + const QueuedSiblingEvent = BaseEvent.extend(`NestedPermitQueuedSibling-${handler_mode}`, {}) + const TailEvent = BaseEvent.extend(`NestedPermitTail-${handler_mode}`, {}) + + const bus = new EventBus(`NestedPermitBus-${handler_mode}`, { + event_concurrency: 'bus-serial', + event_handler_concurrency: handler_mode, + }) + const semaphore = getHandlerSemaphore(bus, handler_mode) + const baseline_in_use = semaphore.in_use + + let queued_sibling_runs = 0 + let tail_runs = 0 + let queued_sibling_ref: InstanceType | null = null + + bus.on(GrandchildEvent, async () => { + await delay(1) + return 'grandchild_done' + }) + + bus.on(ChildEvent, async (event) => { + const grandchild = event.bus?.emit(GrandchildEvent({ event_timeout: 0.2 }))! + await grandchild.done() + await delay(40) + return 'child_done' + }) + + bus.on(QueuedSiblingEvent, async () => { + queued_sibling_runs += 1 + return 'queued_sibling_done' + }) + + bus.on(ParentEvent, async (event) => { + queued_sibling_ref = event.bus?.emit(QueuedSiblingEvent({ event_timeout: 0.2 }))! + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.02 }))! + await child.done() + await delay(40) + }) + + bus.on(TailEvent, async () => { + tail_runs += 1 + return 'tail_done' + }) + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.03 })) + await parent.done() + await bus.waitUntilIdle() + + const parent_result = Array.from(parent.event_results.values())[0] + assert.equal(parent_result.status, 'error') + assert.ok(parent_result.error instanceof EventHandlerTimeoutError) + + assert.ok(queued_sibling_ref) + assert.equal(queued_sibling_runs, 0) + const queued_sibling_results = Array.from(queued_sibling_ref!.event_results.values()) + assert.ok(queued_sibling_results.some((result) => result.error instanceof EventHandlerCancelledError)) + + assert.equal(semaphore.in_use, baseline_in_use) + + const tail = bus.dispatch(TailEvent({ event_timeout: 0.05 })) + const tail_completed = await Promise.race([tail.done().then(() => true), delay(100).then(() => false)]) + assert.equal(tail_completed, true) + assert.equal(tail_runs, 1) + assert.equal(semaphore.in_use, baseline_in_use) + }) +} + +test('parent timeout cancels pending child handler results under serial handler semaphore', async () => { + const ParentEvent = BaseEvent.extend('TimeoutCancelParentEvent', {}) + const ChildEvent = BaseEvent.extend('TimeoutCancelChildEvent', {}) + + const bus = new EventBus('TimeoutCancelBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + + let child_runs = 0 + + bus.on(ChildEvent, async () => { + child_runs += 1 + await delay(30) + return 'first' + }) + + bus.on(ChildEvent, async () => { + child_runs += 1 + await delay(10) + return 'second' + }) + + bus.on(ParentEvent, async (event) => { + event.bus?.emit(ChildEvent({ event_timeout: 0.2 })) + await delay(50) + }) + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.01 })) + await parent.done() + await bus.waitUntilIdle() + + const child = parent.event_children[0] + assert.ok(child) + + assert.equal(child_runs, 0) + + const cancelled_results = Array.from(child.event_results.values()).filter((result) => result.error instanceof EventHandlerCancelledError) + assert.ok(cancelled_results.length > 0) +}) + +test('event_timeout null falls back to bus default', async () => { + const bus = new EventBus('TimeoutDefaultBus', { event_timeout: 0.01 }) + + bus.on(TimeoutEvent, async (_event: BaseEvent) => { + await delay(50) + return 'slow' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: null })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerTimeoutError) +}) + +test('bus default null disables timeouts when event_timeout is null', async () => { + const bus = new EventBus('TimeoutDisabledBus', { event_timeout: null }) + + bus.on(TimeoutEvent, async () => { + await delay(20) + return 'ok' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: null })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'ok') +}) + +test('multi-level timeout cascade with mixed cancellations', async () => { + const TopEvent = BaseEvent.extend('TimeoutCascadeTop', {}) + const QueuedChildEvent = BaseEvent.extend('TimeoutCascadeQueuedChild', {}) + const AwaitedChildEvent = BaseEvent.extend('TimeoutCascadeAwaitedChild', {}) + const ImmediateGrandchildEvent = BaseEvent.extend('TimeoutCascadeImmediateGrandchild', {}) + const QueuedGrandchildEvent = BaseEvent.extend('TimeoutCascadeQueuedGrandchild', {}) + + const bus = new EventBus('TimeoutCascadeBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + + let queued_child: InstanceType | null = null + let awaited_child: InstanceType | null = null + let immediate_grandchild: InstanceType | null = null + let queued_grandchild: InstanceType | null = null + + let queued_child_runs = 0 + let immediate_grandchild_runs = 0 + let queued_grandchild_runs = 0 + + const queued_child_fast = async () => { + queued_child_runs += 1 + await delay(5) + return 'queued_fast' + } + + const queued_child_slow = async () => { + queued_child_runs += 1 + await delay(50) + return 'queued_slow' + } + + const awaited_child_fast = async () => { + await delay(5) + return 'awaited_fast' + } + + const awaited_child_slow = async (event: BaseEvent) => { + queued_grandchild = event.bus?.emit(QueuedGrandchildEvent({ event_timeout: 0.2 }))! + immediate_grandchild = event.bus?.emit(ImmediateGrandchildEvent({ event_timeout: 0.2 }))! + await immediate_grandchild.done() + await delay(100) + return 'awaited_slow' + } + + const immediate_grandchild_slow = async () => { + immediate_grandchild_runs += 1 + await delay(50) + return 'immediate_grandchild_slow' + } + + const immediate_grandchild_fast = async () => { + immediate_grandchild_runs += 1 + await delay(10) + return 'immediate_grandchild_fast' + } + + const queued_grandchild_slow = async () => { + queued_grandchild_runs += 1 + await delay(50) + return 'queued_grandchild_slow' + } + + const queued_grandchild_fast = async () => { + queued_grandchild_runs += 1 + await delay(10) + return 'queued_grandchild_fast' + } + + bus.on(QueuedChildEvent, queued_child_fast) + bus.on(QueuedChildEvent, queued_child_slow) + bus.on(AwaitedChildEvent, awaited_child_fast) + bus.on(AwaitedChildEvent, awaited_child_slow) + bus.on(ImmediateGrandchildEvent, immediate_grandchild_slow) + bus.on(ImmediateGrandchildEvent, immediate_grandchild_fast) + bus.on(QueuedGrandchildEvent, queued_grandchild_slow) + bus.on(QueuedGrandchildEvent, queued_grandchild_fast) + + bus.on(TopEvent, async (event) => { + queued_child = event.bus?.emit(QueuedChildEvent({ event_timeout: 0.2 }))! + awaited_child = event.bus?.emit(AwaitedChildEvent({ event_timeout: 0.03 }))! + await awaited_child.done() + await delay(80) + }) + + const top = bus.dispatch(TopEvent({ event_timeout: 0.04 })) + await top.done() + await bus.waitUntilIdle() + + const top_result = Array.from(top.event_results.values())[0] + assert.equal(top_result.status, 'error') + assert.ok(top_result.error instanceof EventHandlerTimeoutError) + + assert.ok(queued_child) + const queued_results = Array.from(queued_child!.event_results.values()) + assert.equal(queued_child_runs, 0) + assert.ok(queued_results.length >= 2) + for (const result of queued_results) { + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerCancelledError) + assert.ok((result.error as EventHandlerCancelledError).cause instanceof EventHandlerTimeoutError) + } + + assert.ok(awaited_child) + const awaited_results = Array.from(awaited_child!.event_results.values()) + const awaited_completed = awaited_results.filter((result) => result.status === 'completed') + const awaited_timeouts = awaited_results.filter((result) => result.error instanceof EventHandlerTimeoutError) + assert.equal(awaited_completed.length, 1) + assert.equal(awaited_timeouts.length, 1) + + assert.ok(immediate_grandchild) + const immediate_results = Array.from(immediate_grandchild!.event_results.values()) + // With bus-serial handler concurrency (no longer bypassed during queue-jump), + // only the first grandchild handler starts before the awaited child's 30ms timeout fires. + // The second handler is still pending (waiting for semaphore) β†’ cancelled. + // The first handler was already started β†’ aborted (EventHandlerAbortedError). + assert.equal(immediate_grandchild_runs, 1) + const immediate_aborted = immediate_results.filter((result) => result.error instanceof EventHandlerAbortedError) + assert.equal(immediate_aborted.length, 1) + const immediate_cancelled = immediate_results.filter((result) => result.error instanceof EventHandlerCancelledError) + assert.equal(immediate_cancelled.length, 1) + + assert.ok(queued_grandchild) + const queued_grandchild_results = Array.from(queued_grandchild!.event_results.values()) + assert.equal(queued_grandchild_runs, 0) + const queued_cancelled = queued_grandchild_results.filter((result) => result.error instanceof EventHandlerCancelledError) + assert.ok(queued_cancelled.length >= 2) +}) + +// ============================================================================= +// Three-level timeout cascade (mirrors Python test_handler_timeout.py) +// +// This test creates a deep event hierarchy: +// TopEvent (250ms timeout) +// β”œβ”€β”€ ChildEvent (80ms timeout) β€” awaited by top_handler_main +// β”‚ β”œβ”€β”€ GrandchildEvent (35ms timeout) β€” awaited by child_handler +// β”‚ β”‚ └── 5 handlers (parallel): 3 slow (timeout), 2 fast (complete) +// β”‚ └── QueuedGrandchildEvent β€” emitted but NOT awaited, stays in queue +// β”‚ └── 1 handler: never runs, CANCELLED when child_handler times out +// └── SiblingEvent β€” emitted but NOT awaited, stays in queue +// └── 1 handler: never runs, CANCELLED when top_handler_main times out +// +// KEY MECHANIC: When a child event is awaited via event.done() inside a handler, +// it triggers "queue-jumping" via processEventImmediately β†’ runImmediatelyAcrossBuses. +// Queue-jumped events use yield-and-reacquire: the parent handler's semaphore is +// temporarily released so child handlers can acquire it normally. This means +// child handlers run SERIALLY on a bus-serial bus (respecting concurrency limits). +// Non-awaited child events stay in the pending_event_queue and are blocked by +// immediate_processing_stack_depth > 0 (runloop is paused during queue-jump). +// +// TIMEOUT BEHAVIOR: Each handler gets its OWN timeout window starting from when +// that handler begins execution β€” NOT from when the event was dispatched. +// With serial handlers, each timeout starts when the handler acquires the semaphore. +// +// CANCELLATION CASCADE: When a handler times out, bus.cancelPendingDescendants() +// walks the event's children tree and marks any "pending" handler results as +// EventHandlerCancelledError. Only "pending" results are cancelled β€” handlers +// that already started ("started" status) continue running in the background. +// ============================================================================= + +test('three-level timeout cascade with per-level timeouts and cascading cancellation', async () => { + const TopEvent = BaseEvent.extend('Cascade3LTop', {}) + const ChildEvent = BaseEvent.extend('Cascade3LChild', {}) + const GrandchildEvent = BaseEvent.extend('Cascade3LGrandchild', {}) + const QueuedGrandchildEvent = BaseEvent.extend('Cascade3LQueuedGC', {}) + const SiblingEvent = BaseEvent.extend('Cascade3LSibling', {}) + + const bus = new EventBus('Cascade3LevelBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + + const execution_log: string[] = [] + let child_ref: InstanceType | null = null + let grandchild_ref: InstanceType | null = null + let queued_grandchild_ref: InstanceType | null = null + let sibling_ref: InstanceType | null = null + + // ── GrandchildEvent handlers ────────────────────────────────────────── + // These run SERIALLY because queue-jumped events respect the bus-serial + // handler semaphore (yield-and-reacquire). Each handler gets its own 35ms + // timeout window starting from when that handler acquires the semaphore. + // + // Serial order: a(35ms timeout) β†’ b(sync) β†’ c(35ms timeout) β†’ d(10ms) β†’ e(35ms timeout) + // Total time for all 5: ~35+0+35+10+35 = ~115ms (within child's 150ms timeout) + + const gc_handler_a = async () => { + execution_log.push('gc_a_start') + await delay(500) // will be interrupted by 35ms timeout (500ms > total test time) + execution_log.push('gc_a_end') // should never reach here before assertions + return 'gc_a_done' + } + + const gc_handler_b = () => { + execution_log.push('gc_b_complete') + return 'gc_b_done' + } + + const gc_handler_c = async () => { + execution_log.push('gc_c_start') + await delay(500) // will be interrupted by 35ms timeout (500ms > total test time) + execution_log.push('gc_c_end') // should never reach here before assertions + return 'gc_c_done' + } + + const gc_handler_d = async () => { + execution_log.push('gc_d_start') + await delay(10) // fast enough to complete within 35ms + execution_log.push('gc_d_complete') + return 'gc_d_done' + } + + const gc_handler_e = async () => { + execution_log.push('gc_e_start') + await delay(500) // will be interrupted by 35ms timeout (500ms > total test time) + execution_log.push('gc_e_end') // should never reach here before assertions + return 'gc_e_done' + } + + // ── QueuedGrandchildEvent handler ───────────────────────────────────── + // This event is emitted by child_handler but NOT awaited, so it sits in + // pending_event_queue. When child_handler times out at 80ms, + // bus.cancelPendingDescendants walks ChildEvent.event_children and finds + // this event still pending β†’ its handler results are marked as cancelled. + const queued_gc_handler = () => { + execution_log.push('queued_gc_start') // should never reach here + return 'queued_gc_done' + } + + // ── ChildEvent handler ──────────────────────────────────────────────── + // Emits GrandchildEvent (awaited β†’ queue-jump, ~35ms to complete) + // Emits QueuedGrandchildEvent (NOT awaited β†’ stays in queue) + // After grandchild completes, sleeps 300ms β†’ times out at 80ms total + const child_handler = async (event: InstanceType) => { + execution_log.push('child_start') + grandchild_ref = event.bus?.emit(GrandchildEvent({ event_timeout: 0.035 }))! + queued_grandchild_ref = event.bus?.emit(QueuedGrandchildEvent({ event_timeout: 0.5 }))! + // Queue-jump: processes GrandchildEvent immediately via yield-and-reacquire. + // All 5 GC handlers run serially. Completes in ~115ms (within 150ms child timeout). + await grandchild_ref.done() + execution_log.push('child_after_grandchild') + await delay(300) // will be interrupted: child started at ~t=0, timeout at 150ms + execution_log.push('child_end') // should never reach here + return 'child_done' + } + + // ── SiblingEvent handler ────────────────────────────────────────────── + // This event is emitted by top_handler_main but NOT awaited. Stays in + // pending_event_queue until top_handler_main times out at 250ms β†’ + // cancelled by bus.cancelPendingDescendants. + const sibling_handler = () => { + execution_log.push('sibling_start') // should never reach here + return 'sibling_done' + } + + // ── TopEvent handlers ───────────────────────────────────────────────── + // These run SERIALLY (via bus.locks.bus_handler_semaphore) because TopEvent is + // processed by the normal runloop (not queue-jumped). top_handler_fast + // goes first, completes quickly, then top_handler_main starts. + + const top_handler_fast = async () => { + execution_log.push('top_fast_start') + await delay(2) + execution_log.push('top_fast_complete') + return 'top_fast_done' + } + + const top_handler_main = async (event: InstanceType) => { + execution_log.push('top_main_start') + child_ref = event.bus?.emit(ChildEvent({ event_timeout: 0.15 }))! + sibling_ref = event.bus?.emit(SiblingEvent({ event_timeout: 0.5 }))! + // Queue-jump: processes ChildEvent immediately (which in turn queue-jumps + // GrandchildEvent). This entire subtree resolves in ~80ms (child timeout). + await child_ref.done() + execution_log.push('top_main_after_child') + await delay(300) // will be interrupted: top_handler_main started at ~t=2, timeout at 250ms + execution_log.push('top_main_end') // should never reach here + return 'top_main_done' + } + + // Register handlers (registration order = execution order for serial) + bus.on(TopEvent, top_handler_fast) + bus.on(TopEvent, top_handler_main) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, gc_handler_a) + bus.on(GrandchildEvent, gc_handler_b) + bus.on(GrandchildEvent, gc_handler_c) + bus.on(GrandchildEvent, gc_handler_d) + bus.on(GrandchildEvent, gc_handler_e) + bus.on(QueuedGrandchildEvent, queued_gc_handler) + bus.on(SiblingEvent, sibling_handler) + + // ── Dispatch and wait ───────────────────────────────────────────────── + const top = bus.dispatch(TopEvent({ event_timeout: 0.25 })) + await top.done() + await bus.waitUntilIdle() + + // ═══════════════════════════════════════════════════════════════════════ + // ASSERTIONS + // ═══════════════════════════════════════════════════════════════════════ + + // ── TopEvent: 2 handler results (1 completed, 1 timed out) ────────── + assert.equal(top.event_status, 'completed') + assert.ok(top.event_errors.length >= 1, 'TopEvent should have at least 1 error') + + const top_results = Array.from(top.event_results.values()) + assert.equal(top_results.length, 2, 'TopEvent should have 2 handler results') + + const top_fast_result = top_results.find((r) => r.handler_name === 'top_handler_fast') + assert.ok(top_fast_result, 'top_handler_fast result should exist') + assert.equal(top_fast_result!.status, 'completed') + assert.equal(top_fast_result!.result, 'top_fast_done') + + const top_main_result = top_results.find((r) => r.handler_name === 'top_handler_main') + assert.ok(top_main_result, 'top_handler_main result should exist') + assert.equal(top_main_result!.status, 'error') + assert.ok(top_main_result!.error instanceof EventHandlerTimeoutError, 'top_handler_main should have timed out') + + // ── ChildEvent: 1 handler result (timed out at 150ms) ──────────────── + assert.ok(child_ref, 'ChildEvent should have been emitted') + assert.equal(child_ref!.event_status, 'completed') + + const child_results = Array.from(child_ref!.event_results.values()) + assert.equal(child_results.length, 1, 'ChildEvent should have 1 handler result') + assert.equal(child_results[0].handler_name, 'child_handler') + assert.equal(child_results[0].status, 'error') + assert.ok(child_results[0].error instanceof EventHandlerTimeoutError, 'child_handler should have timed out') + + // ── GrandchildEvent: 5 handler results (2 completed, 3 timed out) ── + assert.ok(grandchild_ref, 'GrandchildEvent should have been emitted') + assert.equal(grandchild_ref!.event_status, 'completed') + + const gc_results = Array.from(grandchild_ref!.event_results.values()) + assert.equal(gc_results.length, 5, 'GrandchildEvent should have 5 handler results') + + // Handlers a, c, e: slow β†’ individually timed out + for (const name of ['gc_handler_a', 'gc_handler_c', 'gc_handler_e']) { + const result = gc_results.find((r) => r.handler_name === name) + assert.ok(result, `${name} result should exist`) + assert.equal(result!.status, 'error', `${name} should have status error`) + assert.ok(result!.error instanceof EventHandlerTimeoutError, `${name} should be EventHandlerTimeoutError`) + } + + // Handlers b, d: fast β†’ completed successfully + const gc_b_result = gc_results.find((r) => r.handler_name === 'gc_handler_b') + assert.ok(gc_b_result, 'gc_handler_b result should exist') + assert.equal(gc_b_result!.status, 'completed') + assert.equal(gc_b_result!.result, 'gc_b_done') + + const gc_d_result = gc_results.find((r) => r.handler_name === 'gc_handler_d') + assert.ok(gc_d_result, 'gc_handler_d result should exist') + assert.equal(gc_d_result!.status, 'completed') + assert.equal(gc_d_result!.result, 'gc_d_done') + + // ── QueuedGrandchildEvent: CANCELLED by child_handler timeout ─────── + // This event was emitted but never awaited. It sat in pending_event_queue + // until child_handler timed out, which triggered bus.cancelPendingDescendants + // to walk ChildEvent.event_children and cancel all pending handlers. + assert.ok(queued_grandchild_ref, 'QueuedGrandchildEvent should have been emitted') + assert.equal(queued_grandchild_ref!.event_status, 'completed') + + const queued_gc_results = Array.from(queued_grandchild_ref!.event_results.values()) + assert.equal(queued_gc_results.length, 1, 'QueuedGC should have 1 handler result') + assert.equal(queued_gc_results[0].status, 'error') + assert.ok( + queued_gc_results[0].error instanceof EventHandlerCancelledError, + 'QueuedGC handler should be EventHandlerCancelledError (not timeout β€” it never ran)' + ) + // Verify the cancellation error chain: CancelledError.cause β†’ TimeoutError + assert.ok( + (queued_gc_results[0].error as EventHandlerCancelledError).cause instanceof EventHandlerTimeoutError, + "QueuedGC cancellation should reference the child_handler's timeout as cause" + ) + + // ── SiblingEvent: CANCELLED by top_handler_main timeout ───────────── + // Same pattern: emitted but never awaited, stays in queue, cancelled when + // top_handler_main times out and bus.cancelPendingDescendants runs. + assert.ok(sibling_ref, 'SiblingEvent should have been emitted') + assert.equal(sibling_ref!.event_status, 'completed') + + const sibling_results = Array.from(sibling_ref!.event_results.values()) + assert.equal(sibling_results.length, 1, 'SiblingEvent should have 1 handler result') + assert.equal(sibling_results[0].status, 'error') + assert.ok(sibling_results[0].error instanceof EventHandlerCancelledError, 'SiblingEvent handler should be EventHandlerCancelledError') + assert.ok( + (sibling_results[0].error as EventHandlerCancelledError).cause instanceof EventHandlerTimeoutError, + "SiblingEvent cancellation should reference top_handler_main's timeout as cause" + ) + + // ── Execution log: verify what ran and what didn't ────────────────── + // These handlers started AND completed: + assert.ok(execution_log.includes('top_fast_start'), 'top_fast should have started') + assert.ok(execution_log.includes('top_fast_complete'), 'top_fast should have completed') + assert.ok(execution_log.includes('gc_b_complete'), 'gc_b (sync) should have completed') + assert.ok(execution_log.includes('gc_d_start'), 'gc_d should have started') + assert.ok(execution_log.includes('gc_d_complete'), 'gc_d should have completed') + + // These handlers started but were interrupted by their own timeout: + assert.ok(execution_log.includes('gc_a_start'), 'gc_a should have started') + assert.ok(!execution_log.includes('gc_a_end'), 'gc_a should NOT have finished (timed out)') + assert.ok(execution_log.includes('gc_c_start'), 'gc_c should have started') + assert.ok(!execution_log.includes('gc_c_end'), 'gc_c should NOT have finished (timed out)') + assert.ok(execution_log.includes('gc_e_start'), 'gc_e should have started') + assert.ok(!execution_log.includes('gc_e_end'), 'gc_e should NOT have finished (timed out)') + + // These handlers started and progressed, then parent timeout interrupted: + assert.ok(execution_log.includes('top_main_start'), 'top_main should have started') + assert.ok(execution_log.includes('child_start'), 'child should have started') + assert.ok(execution_log.includes('child_after_grandchild'), 'child should have continued after grandchild completed') + assert.ok(execution_log.includes('top_main_after_child'), 'top_main should have continued after child completed') + assert.ok(!execution_log.includes('child_end'), 'child should NOT have finished (timed out)') + assert.ok(!execution_log.includes('top_main_end'), 'top_main should NOT have finished (timed out)') + + // These handlers never ran at all (cancelled before starting): + assert.ok(!execution_log.includes('queued_gc_start'), 'queued_gc should never have started') + assert.ok(!execution_log.includes('sibling_start'), 'sibling should never have started') + + // ── Parent-child tree structure ───────────────────────────────────── + assert.ok( + top.event_children.some((c) => c.event_id === child_ref!.event_id), + 'ChildEvent should be in TopEvent.event_children' + ) + assert.ok( + top.event_children.some((c) => c.event_id === sibling_ref!.event_id), + 'SiblingEvent should be in TopEvent.event_children' + ) + assert.ok( + child_ref!.event_children.some((c) => c.event_id === grandchild_ref!.event_id), + 'GrandchildEvent should be in ChildEvent.event_children' + ) + assert.ok( + child_ref!.event_children.some((c) => c.event_id === queued_grandchild_ref!.event_id), + 'QueuedGrandchildEvent should be in ChildEvent.event_children' + ) + + // ── Timing invariants ────────────────────────────────────────────── + // All events should have completion timestamps + for (const evt of [top, child_ref!, grandchild_ref!, queued_grandchild_ref!, sibling_ref!]) { + assert.ok(evt.event_completed_at, `${evt.event_type} should have event_completed_at`) + } + // All handler results should have started_at and completed_at + for (const result of top_results) { + assert.ok(result.started_at, `${result.handler_name} should have started_at`) + assert.ok(result.completed_at, `${result.handler_name} should have completed_at`) + } + for (const result of gc_results) { + assert.ok(result.started_at, `${result.handler_name} should have started_at`) + assert.ok(result.completed_at, `${result.handler_name} should have completed_at`) + } +}) + +// ============================================================================= +// Verify the timeoutβ†’cancellation error chain is intact at every level. +// When a parent handler times out and cancels a child's pending handlers, +// the EventHandlerCancelledError.cause must reference the specific +// EventHandlerTimeoutError that caused the cascade. This test creates a +// 2-level chain where each level's cancellation error can be inspected. +// ============================================================================= + +test('cancellation error chain preserves cause references through hierarchy', async () => { + const OuterEvent = BaseEvent.extend('ErrorChainOuter', {}) + const InnerEvent = BaseEvent.extend('ErrorChainInner', {}) + const DeepEvent = BaseEvent.extend('ErrorChainDeep', {}) + + const bus = new EventBus('ErrorChainBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + }) + + let inner_ref: InstanceType | null = null + let deep_ref: InstanceType | null = null + + // DeepEvent handler: sleeps long, will be still pending when inner times out + // Because DeepEvent is emitted but NOT awaited, it stays in the queue. + const deep_handler = async () => { + await delay(200) + return 'deep_done' + } + + // InnerEvent handler: emits DeepEvent (not awaited), then sleeps long β†’ times out + const inner_handler = async (event: InstanceType) => { + deep_ref = event.bus?.emit(DeepEvent({ event_timeout: 0.5 }))! + await delay(200) // interrupted by inner timeout + return 'inner_done' + } + + // OuterEvent handler: emits InnerEvent (awaited), then sleeps long β†’ times out + const outer_handler = async (event: InstanceType) => { + inner_ref = event.bus?.emit(InnerEvent({ event_timeout: 0.04 }))! + await inner_ref.done() + await delay(200) // interrupted by outer timeout + return 'outer_done' + } + + bus.on(OuterEvent, outer_handler) + bus.on(InnerEvent, inner_handler) + bus.on(DeepEvent, deep_handler) + + const outer = bus.dispatch(OuterEvent({ event_timeout: 0.15 })) + await outer.done() + await bus.waitUntilIdle() + + // Outer handler timed out + const outer_result = Array.from(outer.event_results.values())[0] + assert.equal(outer_result.status, 'error') + assert.ok(outer_result.error instanceof EventHandlerTimeoutError) + // Inner handler timed out (its own 40ms timeout, not outer's) + assert.ok(inner_ref) + const inner_result = Array.from(inner_ref!.event_results.values())[0] + assert.equal(inner_result.status, 'error') + assert.ok(inner_result.error instanceof EventHandlerTimeoutError) + const inner_timeout = inner_result.error as EventHandlerTimeoutError + + // Inner's timeout is from InnerEvent's own event_timeout (40ms), + // not inherited from outer + assert.ok(inner_timeout.message.includes('inner_handler'), 'Inner timeout should name inner_handler') + + // DeepEvent was cancelled when inner_handler timed out. + // The cancellation error should reference inner_handler's timeout (not outer's). + assert.ok(deep_ref) + const deep_result = Array.from(deep_ref!.event_results.values())[0] + assert.equal(deep_result.status, 'error') + assert.ok( + deep_result.error instanceof EventHandlerCancelledError, + 'DeepEvent handler should be cancelled, not timed out (it never started)' + ) + const deep_cancel = deep_result.error as EventHandlerCancelledError + assert.ok(deep_cancel.cause instanceof EventHandlerTimeoutError, 'Cancellation should reference parent timeout') + // The cause should be the INNER handler's timeout, because that's + // the handler whose bus.cancelPendingDescendants actually cancelled DeepEvent. + assert.ok( + deep_cancel.cause.message.includes('inner_handler') || deep_cancel.cause.message.includes('child_handler'), + 'cause should reference the handler that directly caused cancellation' + ) +}) + +// ============================================================================= +// When a parent has a timeout but a child has event_timeout: null (no timeout), +// the child's handlers run indefinitely on their own β€” but if the PARENT times +// out, bus.cancelPendingDescendants still cancels any pending child handlers. +// This tests that cancellation works across timeout/no-timeout boundaries. +// ============================================================================= + +test('parent timeout cancels children that have no timeout of their own', async () => { + const ParentEvent = BaseEvent.extend('TimeoutBoundaryParent', {}) + const NoTimeoutChild = BaseEvent.extend('TimeoutBoundaryChild', {}) + + const bus = new EventBus('TimeoutBoundaryBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', + event_timeout: null, // no bus-level default + }) + + let child_ref: InstanceType | null = null + let child_handler_ran = false + + // Child handler: would run forever but should be cancelled + const child_slow_handler = async () => { + child_handler_ran = true + await delay(500) + return 'child_done' + } + + // Parent handler: emits child (not awaited), then sleeps β†’ parent times out + const parent_handler = async (event: InstanceType) => { + // event_timeout: null means the child has no timeout of its own. + // It would run forever if the parent didn't cancel it. + child_ref = event.bus?.emit(NoTimeoutChild({ event_timeout: null }))! + await delay(200) + return 'parent_done' + } + + bus.on(ParentEvent, parent_handler) + bus.on(NoTimeoutChild, child_slow_handler) + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.03 })) + await parent.done() + await bus.waitUntilIdle() + + // Parent timed out + const parent_result = Array.from(parent.event_results.values())[0] + assert.equal(parent_result.status, 'error') + assert.ok(parent_result.error instanceof EventHandlerTimeoutError) + + // Child should exist and be cancelled (it was in the queue, never started) + assert.ok(child_ref, 'Child event should have been emitted') + assert.equal(child_ref!.event_status, 'completed') + assert.equal(child_handler_ran, false, 'Child handler should never have started') + + const child_results = Array.from(child_ref!.event_results.values()) + assert.equal(child_results.length, 1) + assert.ok( + child_results[0].error instanceof EventHandlerCancelledError, + 'Child handler should be cancelled by parent timeout, even though it has no timeout' + ) +}) diff --git a/bubus-ts/tests/typed_results.test.ts b/bubus-ts/tests/typed_results.test.ts new file mode 100644 index 0000000..acec6fa --- /dev/null +++ b/bubus-ts/tests/typed_results.test.ts @@ -0,0 +1,243 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const typed_result_schema = z.object({ + value: z.string(), + count: z.number(), +}) + +const TypedResultEvent = BaseEvent.extend('TypedResultEvent', { + event_result_schema: typed_result_schema, + event_result_type: 'TypedResult', +}) + +const StringResultEvent = BaseEvent.extend('StringResultEvent', { + event_result_schema: z.string(), + event_result_type: 'string', +}) + +const NumberResultEvent = BaseEvent.extend('NumberResultEvent', { + event_result_schema: z.number(), + event_result_type: 'number', +}) + +const ComplexResultEvent = BaseEvent.extend('ComplexResultEvent', { + event_result_schema: z.object({ + items: z.array(z.string()), + metadata: z.record(z.string(), z.number()), + }), +}) + +const NoSchemaEvent = BaseEvent.extend('NoSchemaEvent', {}) + +const AutoObjectResultEvent = BaseEvent.extend('AutoObjectResultEvent', { + event_result_schema: z.object({ ok: z.boolean() }), +}) + +const AutoRecordResultEvent = BaseEvent.extend('AutoRecordResultEvent', { + event_result_schema: z.record(z.string(), z.number()), +}) + +const AutoMapResultEvent = BaseEvent.extend('AutoMapResultEvent', { + event_result_schema: z.map(z.string(), z.number()), +}) + +const AutoStringResultEvent = BaseEvent.extend('AutoStringResultEvent', { + event_result_schema: z.string(), +}) + +const AutoNumberResultEvent = BaseEvent.extend('AutoNumberResultEvent', { + event_result_schema: z.number(), +}) + +const AutoBooleanResultEvent = BaseEvent.extend('AutoBooleanResultEvent', { + event_result_schema: z.boolean(), +}) + +const ExplicitTypeWinsEvent = BaseEvent.extend('ExplicitTypeWinsEvent', { + event_result_schema: z.string(), + event_result_type: 'CustomResultType', +}) + +test('typed result schema validates and parses handler result', async () => { + const bus = new EventBus('TypedResultBus') + + bus.on(TypedResultEvent, () => ({ value: 'hello', count: 42 })) + + const event = bus.dispatch(TypedResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { value: 'hello', count: 42 }) + assert.equal(event.event_result_type, 'TypedResult') +}) + +test('built-in result schemas validate handler results', async () => { + const bus = new EventBus('BuiltinResultBus') + + bus.on(StringResultEvent, () => '42') + bus.on(NumberResultEvent, () => 123) + + const string_event = bus.dispatch(StringResultEvent({})) + const number_event = bus.dispatch(NumberResultEvent({})) + await string_event.done() + await number_event.done() + + const string_result = Array.from(string_event.event_results.values())[0] + const number_result = Array.from(number_event.event_results.values())[0] + + assert.equal(string_result.status, 'completed') + assert.equal(string_result.result, '42') + assert.equal(number_result.status, 'completed') + assert.equal(number_result.result, 123) +}) + +test('invalid handler result marks error when schema is defined', async () => { + const bus = new EventBus('ResultValidationErrorBus') + + bus.on(NumberResultEvent, () => 'not_a_number') + + const event = bus.dispatch(NumberResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof Error) + assert.ok(event.event_errors.length > 0) +}) + +test('no schema leaves raw handler result untouched', async () => { + const bus = new EventBus('NoSchemaResultBus') + + bus.on(NoSchemaEvent, () => ({ raw: true })) + + const event = bus.dispatch(NoSchemaEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { raw: true }) +}) + +test('complex result schema validates nested data', async () => { + const bus = new EventBus('ComplexResultBus') + + bus.on(ComplexResultEvent, () => ({ + items: ['a', 'b'], + metadata: { a: 1, b: 2 }, + })) + + const event = bus.dispatch(ComplexResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { items: ['a', 'b'], metadata: { a: 1, b: 2 } }) +}) + +test('event_result_type auto-infers from common event_result_schema types', () => { + assert.equal(AutoObjectResultEvent.event_result_type, 'object') + assert.equal(AutoRecordResultEvent.event_result_type, 'object') + assert.equal(AutoMapResultEvent.event_result_type, 'object') + assert.equal(AutoStringResultEvent.event_result_type, 'string') + assert.equal(AutoNumberResultEvent.event_result_type, 'number') + assert.equal(AutoBooleanResultEvent.event_result_type, 'boolean') + + assert.equal(AutoObjectResultEvent({}).event_result_type, 'object') + assert.equal(AutoRecordResultEvent({}).event_result_type, 'object') + assert.equal(AutoMapResultEvent({}).event_result_type, 'object') + assert.equal(AutoStringResultEvent({}).event_result_type, 'string') + assert.equal(AutoNumberResultEvent({}).event_result_type, 'number') + assert.equal(AutoBooleanResultEvent({}).event_result_type, 'boolean') +}) + +test('explicit event_result_type is not overridden by inference', () => { + assert.equal(ExplicitTypeWinsEvent.event_result_type, 'CustomResultType') + assert.equal(ExplicitTypeWinsEvent({}).event_result_type, 'CustomResultType') +}) + +test('fromJSON converts event_result_schema into zod schema', async () => { + const bus = new EventBus('FromJsonResultBus') + + const original = TypedResultEvent({ + event_result_schema: typed_result_schema, + event_result_type: 'TypedResult', + }) + const json = original.toJSON() + + const restored = TypedResultEvent.fromJSON?.(json) ?? TypedResultEvent(json as never) + + assert.ok(restored.event_result_schema) + assert.equal(typeof (restored.event_result_schema as { safeParse?: unknown }).safeParse, 'function') + + bus.on(TypedResultEvent, () => ({ value: 'from-json', count: 7 })) + + const dispatched = bus.dispatch(restored) + await dispatched.done() + + const result = Array.from(dispatched.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { value: 'from-json', count: 7 }) +}) + +test('roundtrip preserves complex result schema types', async () => { + const bus = new EventBus('RoundtripSchemaBus') + + const complex_schema = z.object({ + title: z.string(), + count: z.number(), + flags: z.array(z.boolean()), + active: z.boolean(), + meta: z.object({ + tags: z.array(z.string()), + rating: z.number(), + }), + }) + + const ComplexRoundtripEvent = BaseEvent.extend('ComplexRoundtripEvent', { + event_result_schema: complex_schema, + event_result_type: 'ComplexRoundtrip', + }) + + const original = ComplexRoundtripEvent({ + event_result_schema: complex_schema, + event_result_type: 'ComplexRoundtrip', + }) + + const roundtripped = ComplexRoundtripEvent.fromJSON?.(original.toJSON()) ?? ComplexRoundtripEvent(original.toJSON() as never) + + const zod_any = z as unknown as { + toJSONSchema?: (schema: unknown) => unknown + } + if (typeof zod_any.toJSONSchema === 'function') { + const original_schema_json = zod_any.toJSONSchema(complex_schema) + const roundtrip_schema_json = zod_any.toJSONSchema(roundtripped.event_result_schema) + assert.deepEqual(roundtrip_schema_json, original_schema_json) + } + + bus.on(ComplexRoundtripEvent, () => ({ + title: 'ok', + count: 3, + flags: [true, false, true], + active: false, + meta: { tags: ['a', 'b'], rating: 4 }, + })) + + const dispatched = bus.dispatch(roundtripped) + await dispatched.done() + + const result = Array.from(dispatched.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { + title: 'ok', + count: 3, + flags: [true, false, true], + active: false, + meta: { tags: ['a', 'b'], rating: 4 }, + }) +}) diff --git a/bubus-ts/tsconfig.json b/bubus-ts/tsconfig.json new file mode 100644 index 0000000..f653c22 --- /dev/null +++ b/bubus-ts/tsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "target": "ES2022", + "lib": ["ES2024", "DOM"], + "module": "ESNext", + "moduleResolution": "Bundler", + "strict": true, + "skipLibCheck": true, + "noEmitOnError": true, + "declaration": true, + "emitDeclarationOnly": false, + "outDir": "dist/types", + "rootDir": "src", + "forceConsistentCasingInFileNames": true, + "useDefineForClassFields": true + }, + "include": ["src"] +} diff --git a/bubus/__init__.py b/bubus/__init__.py index df6e6e2..be3d8a3 100644 --- a/bubus/__init__.py +++ b/bubus/__init__.py @@ -1,11 +1,25 @@ """Event bus for the browser-use agent.""" -from bubus.models import BaseEvent, EventHandler, EventResult, PythonIdentifierStr, PythonIdStr, UUIDStr -from bubus.service import EventBus +from .event_history import EventHistory, InMemoryEventHistory +from .middlewares import ( + EventBusMiddleware, + LoggerEventBusMiddleware, + SQLiteHistoryMirrorMiddleware, + WALEventBusMiddleware, +) +from .models import BaseEvent, EventHandler, EventResult, EventStatus, PythonIdentifierStr, PythonIdStr, UUIDStr +from .service import EventBus __all__ = [ 'EventBus', + 'EventBusMiddleware', + 'LoggerEventBusMiddleware', + 'SQLiteHistoryMirrorMiddleware', + 'WALEventBusMiddleware', + 'EventHistory', + 'InMemoryEventHistory', 'BaseEvent', + 'EventStatus', 'EventResult', 'EventHandler', 'UUIDStr', diff --git a/bubus/event_history.py b/bubus/event_history.py new file mode 100644 index 0000000..6494bc8 --- /dev/null +++ b/bubus/event_history.py @@ -0,0 +1,17 @@ +from __future__ import annotations + +from typing import Any, Generic, TypeVar + +from .models import BaseEvent, UUIDStr + +BaseEventT = TypeVar('BaseEventT', bound=BaseEvent[Any]) + + +class EventHistory(dict[UUIDStr, BaseEventT], Generic[BaseEventT]): + """Backward-compatible in-memory history with plain dict behaviour.""" + + __slots__ = () + + +# Backwards compatible alias – before refactor this was the default backend. +InMemoryEventHistory = EventHistory diff --git a/bubus/logging.py b/bubus/logging.py index b1b3814..7311db0 100644 --- a/bubus/logging.py +++ b/bubus/logging.py @@ -37,7 +37,7 @@ def log_event_tree( event: 'BaseEvent[Any]', indent: str = '', is_last: bool = True, - child_events_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, + event_children_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, ) -> str: from bubus.models import logger @@ -74,8 +74,8 @@ def log_event_tree( # Calculate which is the last item considering both results and unmapped children unmapped_children: list['BaseEvent[Any]'] = [] - if child_events_by_parent: - all_children = child_events_by_parent.get(event.event_id, []) + if event_children_by_parent: + all_children = event_children_by_parent.get(event.event_id, []) for child in all_children: # Will be printed later if not already printed by a handler if child.event_id not in [c.event_id for r in event.event_results.values() for c in r.event_children]: @@ -85,18 +85,18 @@ def log_event_tree( for i, (_handler_id, result) in enumerate(results_sorted): is_last_item = i == total_items - 1 - lines.append(log_eventresult_tree(result, new_indent, is_last_item, child_events_by_parent)) + lines.append(log_eventresult_tree(result, new_indent, is_last_item, event_children_by_parent)) # Track child events printed by this result for child in result.event_children: printed_child_ids.add(child.event_id) # Print unmapped children (those not printed by any handler) - if child_events_by_parent: - children = child_events_by_parent.get(event.event_id, []) + if event_children_by_parent: + children = event_children_by_parent.get(event.event_id, []) for i, child in enumerate(children): if child.event_id not in printed_child_ids: is_last_child = i == len(children) - 1 - lines.append(log_event_tree(child, new_indent, is_last_child, child_events_by_parent)) + lines.append(log_event_tree(child, new_indent, is_last_child, event_children_by_parent)) return '\n'.join(lines) @@ -105,7 +105,7 @@ def log_eventresult_tree( result: 'EventResult[Any]', indent: str = '', is_last: bool = True, - child_events_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, + event_children_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, ) -> str: """Print this result and its child events with proper tree formatting""" @@ -158,7 +158,7 @@ def log_eventresult_tree( if result.event_children: for i, child in enumerate(result.event_children): is_last_child = i == len(result.event_children) - 1 - lines.append(log_event_tree(child, new_indent, is_last_child, child_events_by_parent)) + lines.append(log_event_tree(child, new_indent, is_last_child, event_children_by_parent)) return '\n'.join(lines) diff --git a/bubus/middlewares.py b/bubus/middlewares.py new file mode 100644 index 0000000..c883d6d --- /dev/null +++ b/bubus/middlewares.py @@ -0,0 +1,269 @@ +"""Reusable EventBus middleware helpers.""" + +from __future__ import annotations + +import asyncio +import logging +import sqlite3 +import threading +from pathlib import Path +from typing import Any + +from bubus.logging import log_eventbus_tree +from bubus.models import BaseEvent, EventResult, EventStatus +from bubus.service import EventBus +from bubus.service import EventBusMiddleware as _EventBusMiddleware + +__all__ = [ + 'EventBusMiddleware', + 'WALEventBusMiddleware', + 'LoggerEventBusMiddleware', + 'SQLiteHistoryMirrorMiddleware', +] + +logger = logging.getLogger('bubus.middleware') + +EventBusMiddleware = _EventBusMiddleware + + +class WALEventBusMiddleware(EventBusMiddleware): + """Persist completed events to a JSONL write-ahead log.""" + + def __init__(self, wal_path: Path | str): + self.wal_path = Path(wal_path) + self.wal_path.parent.mkdir(parents=True, exist_ok=True) + self._lock = threading.Lock() + + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: + if status != EventStatus.COMPLETED: + return + try: + event_json = event.model_dump_json() # pyright: ignore[reportUnknownMemberType] + await asyncio.to_thread(self._write_line, event_json + '\n') + except Exception as exc: # pragma: no cover + logger.error('❌ %s Failed to save event %s to WAL: %s', eventbus, event.event_id, exc) + + def _write_line(self, line: str) -> None: + with self._lock: + with self.wal_path.open('a', encoding='utf-8') as fp: + fp.write(line) + + +class LoggerEventBusMiddleware(EventBusMiddleware): + """Log completed events to stdout and optionally to a file.""" + + def __init__(self, log_path: Path | str | None = None): + self.log_path = Path(log_path) if log_path is not None else None + if self.log_path is not None: + self.log_path.parent.mkdir(parents=True, exist_ok=True) + + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: + if status != EventStatus.COMPLETED: + return + + summary = event.event_log_safe_summary() + logger.info('βœ… %s completed event %s', eventbus, summary) + line = f'[{eventbus.name}] {summary}\n' + + if self.log_path is not None: + await asyncio.to_thread(self._write_line, line) + print(line.rstrip('\n'), flush=True) + + if logger.isEnabledFor(logging.DEBUG): + log_eventbus_tree(eventbus) + + def _write_line(self, line: str) -> None: + with self.log_path.open('a', encoding='utf-8') as fp: # type: ignore[union-attr] + fp.write(line) + + +class SQLiteHistoryMirrorMiddleware(EventBusMiddleware): + """Mirror event and handler snapshots into append-only SQLite tables.""" + + def __init__(self, db_path: Path | str): + self.db_path = Path(db_path) + self.db_path.parent.mkdir(parents=True, exist_ok=True) + + self._lock = threading.RLock() + self._conn = sqlite3.connect(self.db_path, check_same_thread=False, isolation_level=None) + self._init_db() + + def __del__(self): + try: + self._conn.close() + except Exception: + pass + + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: + event_json = event.model_dump_json() + await asyncio.to_thread( + self._insert_event_snapshot, + eventbus, + event.event_id, + event.event_type, + str(event.event_status), + str(status), + event_json, + ) + + async def on_event_result_change( + self, + eventbus: EventBus, + event: BaseEvent[Any], + event_result: EventResult[Any], + status: EventStatus, + ) -> None: + error_repr = repr(event_result.error) if event_result.error is not None else None + result_repr: str | None = None + if event_result.result is not None and event_result.error is None: + try: + result_repr = repr(event_result.result) + except Exception: + result_repr = '' + + try: + event_result_json = event_result.model_dump_json() + except Exception: + event_result_json = None + + await asyncio.to_thread( + self._insert_event_result_snapshot, + event_result.id, + event_result.event_id, + event_result.handler_id, + event_result.handler_name, + eventbus.id, + eventbus.name, + event.event_type, + event_result.status, + str(status), + result_repr, + error_repr, + event_result_json, + ) + + def _init_db(self) -> None: + with self._lock: + self._conn.execute( + """ + CREATE TABLE IF NOT EXISTS events_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_id TEXT NOT NULL, + event_type TEXT NOT NULL, + event_status TEXT NOT NULL, + eventbus_id TEXT NOT NULL, + eventbus_name TEXT NOT NULL, + phase TEXT, + event_json TEXT NOT NULL, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + self._conn.execute( + """ + CREATE TABLE IF NOT EXISTS event_results_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_result_id TEXT NOT NULL, + event_id TEXT NOT NULL, + handler_id TEXT NOT NULL, + handler_name TEXT NOT NULL, + eventbus_id TEXT NOT NULL, + eventbus_name TEXT NOT NULL, + event_type TEXT NOT NULL, + status TEXT NOT NULL, + phase TEXT, + result_repr TEXT, + error_repr TEXT, + event_result_json TEXT, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + self._conn.execute('PRAGMA journal_mode=WAL') + self._conn.execute('PRAGMA synchronous=NORMAL') + + def _insert_event_snapshot( + self, + eventbus: EventBus, + event_id: str, + event_type: str, + event_status: str, + phase: str | None, + event_json: str, + ) -> None: + with self._lock: + self._conn.execute( + """ + INSERT INTO events_log ( + event_id, + event_type, + event_status, + eventbus_id, + eventbus_name, + phase, + event_json + ) + VALUES (?, ?, ?, ?, ?, ?, ?) + """, + ( + event_id, + event_type, + event_status, + eventbus.id, + eventbus.name, + phase, + event_json, + ), + ) + self._conn.commit() + + def _insert_event_result_snapshot( + self, + event_result_id: str, + event_id: str, + handler_id: str, + handler_name: str, + eventbus_id: str, + eventbus_name: str, + event_type: str, + status: str, + phase: str | None, + result_repr: str | None, + error_repr: str | None, + event_result_json: str | None, + ) -> None: + with self._lock: + self._conn.execute( + """ + INSERT INTO event_results_log ( + event_result_id, + event_id, + handler_id, + handler_name, + eventbus_id, + eventbus_name, + event_type, + status, + phase, + result_repr, + error_repr, + event_result_json + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + event_result_id, + event_id, + handler_id, + handler_name, + eventbus_id, + eventbus_name, + event_type, + status, + phase, + result_repr, + error_repr, + event_result_json, + ), + ) + self._conn.commit() diff --git a/bubus/models.py b/bubus/models.py index 4079e49..ec147dd 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -1,9 +1,12 @@ import asyncio +import contextvars import inspect import logging import os +from collections import deque from collections.abc import Awaitable, Callable, Generator from datetime import UTC, datetime +from enum import StrEnum from typing import TYPE_CHECKING, Annotated, Any, ClassVar, Generic, Literal, Protocol, Self, TypeAlias, cast, runtime_checkable from uuid import UUID @@ -32,6 +35,17 @@ logger.setLevel(BUBUS_LOGGING_LEVEL) +class EventStatus(StrEnum): + """Status of an event or handler in the EventBus lifecycle. + + Using StrEnum ensures backwards compatibility - comparisons like + `status == 'pending'` still work since EventStatus.PENDING == 'pending'. + """ + PENDING = 'pending' + STARTED = 'started' + COMPLETED = 'completed' # errored events are also considered completed + + def validate_event_name(s: str) -> str: assert str(s).isidentifier() and not str(s).startswith('_'), f'Invalid event name: {s}' return str(s) @@ -255,99 +269,160 @@ def event_result_type_serializer(self, value: Any) -> str | None: # Completion signal _event_completed_signal: asyncio.Event | None = PrivateAttr(default=None) + _event_is_complete_flag: bool = PrivateAttr(default=False) + + # Dispatch-time context for ContextVar propagation to handlers + # Captured when dispatch() is called, used when executing handlers via ctx.run() + _event_dispatch_context: contextvars.Context | None = PrivateAttr(default=None) def __hash__(self) -> int: """Make events hashable using their unique event_id""" return hash(self.event_id) def __str__(self) -> str: - """BaseEvent#ab12⏳""" - icon = ( - '⏳' - if self.event_status == 'pending' - else 'βœ…' - if self.event_status == 'completed' - else '❌' - if self.event_status == 'error' - else 'πŸƒ' + """Compact O(1) summary for hot-path logging.""" + completed_signal = self._event_completed_signal + is_complete = self._event_is_complete_flag or ( + completed_signal is not None and completed_signal.is_set() ) - # AuthBus≫DataBusβ–Ά AuthLoginEvent#ab12 ⏳ - return f'{"≫".join(self.event_path[1:] or "?")}β–Ά {self.event_type}#{self.event_id[-4:]} {icon}' - - def __await__(self) -> Generator[Self, Any, Any]: - """Wait for event to complete and return self""" + if is_complete: + icon = 'βœ…' + elif self.event_processed_at is not None: + icon = 'πŸƒ' + else: + icon = '⏳' + + bus_hint = self.event_path[-1] if self.event_path else '?' + return f'{bus_hint}β–Ά {self.event_type}#{self.event_id[-4:]} {icon}' + + def _remove_self_from_queue(self, bus: 'EventBus') -> bool: + """Remove this event from the bus's queue if present. Returns True if removed.""" + if bus and bus.event_queue and hasattr(bus.event_queue, '_queue'): + # Access internal deque of asyncio.Queue (implementation detail) + queue = cast(deque[BaseEvent[Any]], bus.event_queue._queue) # type: ignore[attr-defined] + if self in queue: + queue.remove(self) + return True + return False + + def _is_queued_on_any_bus(self, ignore_bus: 'EventBus | None' = None) -> bool: + """ + Check whether this event is currently queued on any live EventBus. - # long descriptive name here really helps make traceback easier to follow - async def wait_for_handlers_to_complete_then_return_event(): - assert self.event_completed_signal is not None + This prevents premature completion when an event has been forwarded to + another bus but that bus hasn't processed it yet. + """ + from bubus.service import EventBus - # If we're inside a handler and this event isn't complete yet, - # we need to process it immediately to avoid deadlock - from bubus.service import EventBus, holds_global_lock, inside_handler_context + for bus in list(EventBus.all_instances): + if not bus: + continue + if self.event_id in getattr(bus, '_processing_event_ids', set()): + if ignore_bus is not None and bus is ignore_bus: + continue + return True + if not bus.event_queue or not hasattr(bus.event_queue, '_queue'): + continue + queue = cast(deque[BaseEvent[Any]], bus.event_queue._queue) # type: ignore[attr-defined] + if self in queue: + return True + return False - if not self.event_completed_signal.is_set() and inside_handler_context.get() and holds_global_lock.get(): - # We're inside a handler and hold the global lock - # Process events until this one completes + async def _process_self_on_all_buses(self) -> None: + """ + Process this specific event on all buses where it's queued. - # logger.debug(f'__await__ for {self} - inside handler context, processing child events') + This handles the case where an event is forwarded to multiple buses - + we need to process it on each bus, but we only process THIS event, + not other events in the queues (to avoid overshoot). - # Keep processing events from all buses until this event is complete - max_iterations = 1000 # Prevent infinite loops - iterations = 0 + The loop continues until the event's completion signal is set, which + happens after all handlers on all buses have completed. + """ + from bubus.service import EventBus - try: - while not self.event_completed_signal.is_set() and iterations < max_iterations: - iterations += 1 - processed_any = False - - # Process any queued events on all buses - # Create a list copy to avoid "Set changed size during iteration" error - for bus in list(EventBus.all_instances): - if not bus or not bus.event_queue: - continue - - # Process one event from this bus if available - try: - if bus.event_queue.qsize() > 0: - event = bus.event_queue.get_nowait() - await bus.process_event(event) - bus.event_queue.task_done() - processed_any = True - # Check if the event we're waiting for is now complete - if self.event_completed_signal.is_set(): - break - except asyncio.QueueEmpty: - pass - - # Break out of the loop if event completed after processing - if self.event_completed_signal.is_set(): + max_iterations = 1000 # Prevent infinite loops + iterations = 0 + + # Cache the signal - in async context it will always be created + completed_signal = self.event_completed_signal + assert completed_signal is not None, 'event_completed_signal should exist in async context' + + try: + while not completed_signal.is_set() and iterations < max_iterations: + iterations += 1 + processed_any = False + + # Look for this specific event in all bus queues and process it + for bus in list(EventBus.all_instances): + if not bus or not bus.event_queue: + continue + + # Check if THIS event is in this bus's queue + if self._remove_self_from_queue(bus): + # Process only this event on this bus + bus._processing_event_ids.add(self.event_id) + try: + await bus.handle_event(self) + bus.event_queue.task_done() + finally: + bus._processing_event_ids.discard(self.event_id) + processed_any = True + + # Check if we're done after processing + if completed_signal.is_set(): break - if not processed_any: - # No events to process, yield control and check for cancellation - try: - await asyncio.sleep(0) - except asyncio.CancelledError: - raise - except asyncio.CancelledError: - # Handler was cancelled due to timeout, exit cleanly - logger.debug(f'Polling loop cancelled for {self}') - raise + if completed_signal.is_set(): + break - if iterations >= max_iterations: - # logger.error(f'Max iterations reached while waiting for {self}') - pass - else: - # Not in handler context - wait for the event to complete normally - await self.event_completed_signal.wait() + if not processed_any: + # Event not in any queue, yield control and wait + await asyncio.sleep(0) + + except asyncio.CancelledError: + if logger.isEnabledFor(logging.DEBUG): + logger.debug('Polling loop cancelled for %s', self) + raise + + async def _wait_for_completion_inside_handler(self) -> None: + """ + Wait for this event to complete when called from inside a handler. + + Processes this specific event on all buses where it appears (handling + the forwarding case), but doesn't process other events (avoiding overshoot). + """ + await self._process_self_on_all_buses() + + async def _wait_for_completion_outside_handler(self) -> None: + """ + Wait for this event to complete when called from outside a handler. + + Simply waits on the completion signal - the event loop's normal + processing will handle the event. + """ + if self._event_is_complete_flag: + return + assert self.event_completed_signal is not None + await self.event_completed_signal.wait() + + def __await__(self) -> Generator[Self, Any, Any]: + """Wait for event to complete and return self""" + + async def wait_for_handlers_to_complete_then_return_event(): + if self._event_is_complete_flag: + return self + assert self.event_completed_signal is not None + from bubus.service import holds_global_lock, inside_handler_context - # Check if any handlers had errors and raise the first one - # for result in self.event_results.values(): - # if result.error: - # raise result.error + is_inside_handler = inside_handler_context.get() and holds_global_lock.get() + is_not_yet_complete = not self._event_is_complete_flag and not self.event_completed_signal.is_set() + + if is_not_yet_complete and is_inside_handler: + await self._wait_for_completion_inside_handler() + else: + await self._wait_for_completion_outside_handler() - # Return the completed event without raising errors - # Errors should only be raised when explicitly requested via event_result() methods return self return wait_for_handlers_to_complete_then_return_event().__await__() @@ -421,8 +496,15 @@ def event_completed_signal(self) -> asyncio.Event | None: return self._event_completed_signal @property - def event_status(self) -> str: - return 'completed' if self.event_completed_at else 'started' if self.event_started_at else 'pending' + def event_status(self) -> EventStatus: + """Current status of this event in the lifecycle.""" + if self._event_is_complete_flag: + return EventStatus.COMPLETED + if self._event_completed_signal is not None and self._event_completed_signal.is_set(): + return EventStatus.COMPLETED + if self.event_started_at is not None: + return EventStatus.STARTED + return EventStatus.PENDING @property def event_children(self) -> list['BaseEvent[Any]']: @@ -435,27 +517,78 @@ def event_children(self) -> list['BaseEvent[Any]']: @property def event_started_at(self) -> datetime | None: """Timestamp when event first started being processed by any handler""" - started_times = [result.started_at for result in self.event_results.values() if result.started_at is not None] - # If no handlers but event was processed, use the processed timestamp - if not started_times and self.event_processed_at: + earliest_started: datetime | None = None + for result in self.event_results.values(): + started_at = result.started_at + if started_at is None: + continue + if earliest_started is None or started_at < earliest_started: + earliest_started = started_at + # If no handlers but event was processed, use the processed timestamp. + if earliest_started is None and self.event_processed_at: return self.event_processed_at - return min(started_times) if started_times else None + return earliest_started @property def event_completed_at(self) -> datetime | None: """Timestamp when event was completed by all handlers""" - # If no handlers at all but event was processed, use the processed timestamp + # If no handlers at all but event was processed, use the processed timestamp. + # This supports manually deserialized/updated events in tests and tooling. if not self.event_results and self.event_processed_at: return self.event_processed_at - # All handlers must be done (completed or error) - all_done = all(result.status in ('completed', 'error') for result in self.event_results.values()) - if not all_done: + if not self._event_is_complete_flag and not ( + self._event_completed_signal is not None and self._event_completed_signal.is_set() + ): + # Fast negative path for in-flight events return None - # Return the latest completion time - completed_times = [result.completed_at for result in self.event_results.values() if result.completed_at is not None] - return max(completed_times) if completed_times else self.event_processed_at + if not self.event_results: + return self.event_processed_at + + latest_completed: datetime | None = None + for result in self.event_results.values(): + if result.status not in ('completed', 'error'): + return None + completed_at = result.completed_at + if completed_at is None: + continue + if latest_completed is None or completed_at > latest_completed: + latest_completed = completed_at + return latest_completed or self.event_processed_at + + def event_create_pending_results( + self, + handlers: dict[PythonIdStr, EventHandler], + *, + eventbus: 'EventBus | None' = None, + timeout: float | None = None, + ) -> 'dict[PythonIdStr, EventResult[T_EventResultType]]': + """Ensure EventResult placeholders exist for provided handlers before execution. + + Any stale timing/error data from prior runs is cleared so consumers immediately see a fresh pending state. + """ + pending_results: dict[PythonIdStr, 'EventResult[T_EventResultType]'] = {} + self._event_is_complete_flag = False + for handler_id, handler in handlers.items(): + event_result = self.event_result_update( + handler=handler, + eventbus=eventbus, + status='pending', + ) + # Reset runtime fields so we never reuse stale data + event_result.result = None + event_result.error = None + event_result.started_at = None + event_result.completed_at = None + event_result.status = 'pending' + event_result.timeout = timeout if timeout is not None else self.event_timeout + event_result.result_type = self.event_result_type + pending_results[handler_id] = event_result + + if self.event_completed_signal and not self.event_completed_signal.is_set(): + self.event_processed_at = self.event_processed_at or datetime.now(UTC) + return pending_results @staticmethod def _event_result_is_truthy(event_result: 'EventResult[T_EventResultType]') -> bool: @@ -505,18 +638,18 @@ async def event_results_filtered( } if raise_if_any and error_results: - failing_handler, failing_result = list(error_results.items())[0] # throw first error - original_error = failing_result.error or cast(Any, failing_result.result) - - # Log the handler context information instead of wrapping the exception - logger.debug(f'Event handler {failing_handler}({self}) returned an error -> {original_error}') - - # Re-raise the original exception to preserve its type and structured data - if isinstance(original_error, BaseException): - raise original_error - else: - # Fallback for non-exception errors (shouldn't happen in practice) - raise Exception(str(original_error)) + if len(error_results) == 1: + single_result = next(iter(error_results.values())) + single_error = single_result.error or cast(Any, single_result.result) + if isinstance(single_error, BaseException): + raise single_error + raise Exception(str(single_error)) + + collected_errors = self._collect_handler_errors(include_cancelled=True) + raise ExceptionGroup( + f'Event {self.event_type}#{self.event_id[-4:]} had {len(collected_errors)} handler error(s)', + collected_errors, + ) if raise_if_none and not included_results: raise ValueError( @@ -531,6 +664,54 @@ async def event_results_filtered( return event_results_by_handler_id + async def raise_if_errors( + self, + timeout: float | None = None, + include_cancelled: bool = False, + ) -> None: + """ + Raise an ExceptionGroup containing all handler errors for this event. + + This waits for event completion, then aggregates handler failures from + event_results. By default, asyncio.CancelledError entries are ignored. + """ + assert self.event_completed_signal is not None, 'Event cannot be awaited outside of an async context' + await asyncio.wait_for(self.event_completed_signal.wait(), timeout=timeout or self.event_timeout) + + collected_errors = self._collect_handler_errors(include_cancelled=include_cancelled) + + if collected_errors: + raise ExceptionGroup( + f'Event {self.event_type}#{self.event_id[-4:]} had {len(collected_errors)} handler error(s)', + collected_errors, + ) + + def _collect_handler_errors(self, include_cancelled: bool) -> list[Exception]: + """Collect handler errors as Exception instances for aggregation.""" + collected_errors: list[Exception] = [] + for event_result in self.event_results.values(): + original_error = event_result.error + if original_error is None and isinstance(event_result.result, BaseException): + original_error = event_result.result + + if original_error is None: + continue + + if isinstance(original_error, asyncio.CancelledError) and not include_cancelled: + continue + + if isinstance(original_error, Exception): + collected_errors.append(original_error) + continue + + wrapped = RuntimeError( + f'Non-Exception handler error from {event_result.eventbus_name}.{event_result.handler_name}: ' + f'{type(original_error).__name__}: {original_error}' + ) + wrapped.__cause__ = original_error + collected_errors.append(wrapped) + return collected_errors + async def event_results_by_handler_id( self, timeout: float | None = None, @@ -682,43 +863,60 @@ def event_result_update( # Update the EventResult with provided kwargs self.event_results[handler_id].update(**kwargs) + if 'timeout' in kwargs: + self.event_results[handler_id].timeout = kwargs['timeout'] + if kwargs.get('status') == 'started' and hasattr(self, 'event_processed_at'): + self.event_processed_at = self.event_processed_at or datetime.now(UTC) # logger.debug( # f'Updated EventResult for handler {handler_id}: status={self.event_results[handler_id].status}, total_results={len(self.event_results)}' # ) # Don't mark complete here - let the EventBus do it after all handlers are done return self.event_results[handler_id] - def event_mark_complete_if_all_handlers_completed(self) -> None: + def event_mark_complete_if_all_handlers_completed(self, current_bus: 'EventBus | None' = None) -> None: """Check if all handlers are done and signal completion""" - if self.event_completed_signal and not self.event_completed_signal.is_set(): - # If there are no results at all, the event is complete - if not self.event_results: - if hasattr(self, 'event_processed_at'): - self.event_processed_at = datetime.now(UTC) - self.event_completed_signal.set() + completed_signal = self._event_completed_signal + if completed_signal is not None and completed_signal.is_set(): + self._event_is_complete_flag = True + return + + # If there are no results at all, the event is complete. + if not self.event_results: + # Even with no local handlers, forwarded copies may still be queued elsewhere. + if self._is_queued_on_any_bus(ignore_bus=current_bus): return - - # Check if all handler results are done - all_handlers_done = all(result.status in ('completed', 'error') for result in self.event_results.values()) - if not all_handlers_done: - # logger.debug( - # f'Event {self} not complete - waiting for handlers: {[r for r in self.event_results.values() if r.status not in ("completed", "error")]}' - # ) - return - - # Recursively check if all child events are also complete if not self.event_are_all_children_complete(): - # incomplete_children = [c for c in self.event_children if c.event_status != 'completed'] - # logger.debug( - # f'Event {self} not complete - waiting for {len(incomplete_children)} child events: {incomplete_children}' - # ) return - - # All handlers and all child events are done if hasattr(self, 'event_processed_at'): self.event_processed_at = datetime.now(UTC) - # logger.debug(f'Event {self} marking complete - all handlers and children done') - self.event_completed_signal.set() + self._event_is_complete_flag = True + if completed_signal is not None: + completed_signal.set() + self._event_dispatch_context = None + return + + # Check if all handler results are done. + for result in self.event_results.values(): + if result.status not in ('completed', 'error'): + return + + # Forwarded events may still be waiting in another bus queue. + # Don't mark complete until all queue copies have been consumed. + if self._is_queued_on_any_bus(ignore_bus=current_bus): + return + + # Recursively check if all child events are also complete + if not self.event_are_all_children_complete(): + return + + # All handlers and all child events are done. + if hasattr(self, 'event_processed_at'): + self.event_processed_at = datetime.now(UTC) + self._event_is_complete_flag = True + if completed_signal is not None: + completed_signal.set() + # Clear dispatch context to avoid memory leaks (it holds references to ContextVars) + self._event_dispatch_context = None def event_are_all_children_complete(self, _visited: set[str] | None = None) -> bool: """Recursively check if all child events and their descendants are complete""" @@ -732,7 +930,8 @@ def event_are_all_children_complete(self, _visited: set[str] | None = None) -> b for child_event in self.event_children: if child_event.event_status != 'completed': - logger.debug(f'Event {self} has incomplete child {child_event}') + if logger.isEnabledFor(logging.DEBUG): + logger.debug('Event %s has incomplete child %s', self, child_event) return False # Recursively check child's children if not child_event.event_are_all_children_complete(_visited): @@ -760,12 +959,12 @@ def event_log_tree( self, indent: str = '', is_last: bool = True, - child_events_by_parent: 'dict[str | None, list[BaseEvent[Any]]] | None' = None, + event_children_by_parent: 'dict[str | None, list[BaseEvent[Any]]] | None' = None, ) -> None: """Print this event and its results with proper tree formatting""" from bubus.logging import log_event_tree - log_event_tree(self, indent, is_last, child_events_by_parent) + log_event_tree(self, indent, is_last, event_children_by_parent) @property def event_bus(self) -> 'EventBus': @@ -792,7 +991,8 @@ def event_bus(self) -> 'EventBus': def attr_name_allowed(key: str) -> bool: - return key in pydantic_builtin_attrs or key in event_builtin_attrs or key.startswith('_') + allowed_unprefixed_attrs = {'raise_if_errors'} + return key in pydantic_builtin_attrs or key in event_builtin_attrs or key.startswith('_') or key in allowed_unprefixed_attrs # PSA: All BaseEvent buil-in attrs and methods must be prefixed with "event_" in order to avoid clashing with data contents (which share a namespace with the metadata) @@ -848,6 +1048,11 @@ class EventResult(BaseModel, Generic[T_EventResultType]): # and it would significantly reduce runtime flexibility, e.g. you couldn't define and dispatch arbitrary server-provided event types at runtime event_children: list['BaseEvent[Any]'] = Field(default_factory=list) # pyright: ignore[reportUnknownVariableType] + @field_serializer('result', when_used='json') + def _serialize_result(self, value: T_EventResultType | BaseEvent[Any] | None) -> Any: + """Preserve handler return values when serializing without extra validation.""" + return value + @property def handler_completed_signal(self) -> asyncio.Event | None: """Lazily create asyncio.Event when accessed""" @@ -958,13 +1163,193 @@ def update(self, **kwargs: Any) -> Self: self.handler_completed_signal.set() return self + async def execute( + self, + event: 'BaseEvent[T_EventResultType]', + handler: EventHandler, + *, + eventbus: 'EventBus', + timeout: float | None, + enter_handler_context: Callable[[BaseEvent[Any], str], tuple[Any, Any, Any]] | None = None, + exit_handler_context: Callable[[tuple[Any, Any, Any]], None] | None = None, + format_exception_for_log: Callable[[BaseException], str] | None = None, + ) -> T_EventResultType | BaseEvent[Any] | None: + """Execute the handler and update internal state automatically.""" + + def _default_enter_handler_context(_: BaseEvent[Any], __: str) -> tuple[None, None, None]: + return (None, None, None) + + def _default_exit_handler_context(_: tuple[Any, Any, Any]) -> None: + return None + + def _default_format_exception_for_log(exc: BaseException) -> str: + from traceback import TracebackException + + return ''.join( + TracebackException.from_exception(exc, capture_locals=False).format() + ) + + _enter_handler_context_callable = enter_handler_context or _default_enter_handler_context + _exit_handler_context_callable = exit_handler_context or _default_exit_handler_context + _format_exception_for_log_callable = format_exception_for_log or _default_format_exception_for_log + + self.timeout = timeout if timeout is not None else self.timeout or event.event_timeout + self.result_type = event.event_result_type + self.update(status='started') + if hasattr(event, 'event_processed_at'): + event.event_processed_at = event.event_processed_at or datetime.now(UTC) + + monitor_task: asyncio.Task[None] | None = None + handler_task: asyncio.Task[Any] | None = None + + # Use dispatch-time context if available (GitHub issue #20) + # This ensures ContextVars set before dispatch() are accessible in handlers + # Use getattr to handle stub events that may not have this attribute + dispatch_context = getattr(event, '_event_dispatch_context', None) + + async def deadlock_monitor() -> None: + await asyncio.sleep(15.0) + logger.warning( + f'⚠️ {eventbus} handler {self.handler_name}() has been running for >15s on event. Possible slow processing or deadlock.\n' + '(handler could be trying to await its own result or could be blocked by another async task).\n' + f'{self.handler_name}({event})' + ) + + monitor_task = asyncio.create_task( + deadlock_monitor(), name=f'{eventbus}.deadlock_monitor({event}, {self.handler_name}#{self.handler_id[-4:]})' + ) + + # For handlers running in dispatch context, we need to set up internal context vars + # INSIDE that context. Create a wrapper that does setup -> handler -> cleanup. + # This includes holds_global_lock which is set by ReentrantLock in the parent context. + async def async_handler_with_context() -> Any: + """Wrapper that sets up internal context before calling async handler.""" + from bubus.service import holds_global_lock + # Set holds_global_lock since we're running inside a handler that holds the lock + # (ReentrantLock set this in the parent context, but dispatch_context is from before that) + holds_global_lock.set(True) + tokens = _enter_handler_context_callable(event, self.handler_id) + try: + return await handler(event) # type: ignore + finally: + _exit_handler_context_callable(tokens) + + def sync_handler_with_context() -> Any: + """Wrapper that sets up internal context before calling sync handler.""" + from bubus.service import holds_global_lock + holds_global_lock.set(True) + tokens = _enter_handler_context_callable(event, self.handler_id) + try: + return handler(event) # type: ignore[call-arg] # protocol allows _self param but we dont need it because it's already bound + finally: + _exit_handler_context_callable(tokens) + + # If no dispatch context, set up context vars the normal way (outside handler) + if dispatch_context is None: + handler_context_tokens = _enter_handler_context_callable(event, self.handler_id) + else: + handler_context_tokens = None # Will be set inside the wrapper + + try: + if inspect.iscoroutinefunction(handler): + if dispatch_context is not None: + # Run wrapper (which sets internal context) inside dispatch context + handler_task = asyncio.create_task( + async_handler_with_context(), + context=dispatch_context, + ) + else: + handler_task = asyncio.create_task(handler(event)) # type: ignore + handler_return_value: Any = await asyncio.wait_for(handler_task, timeout=self.timeout) + elif inspect.isfunction(handler) or inspect.ismethod(handler): + if dispatch_context is not None: + # Run sync wrapper inside dispatch context + handler_return_value = dispatch_context.run(sync_handler_with_context) + else: + handler_return_value = handler(event) + if isinstance(handler_return_value, BaseEvent): + logger.debug( + f'Handler {self.handler_name} returned BaseEvent, not awaiting to avoid circular dependency' + ) + else: + raise ValueError(f'Handler {get_handler_name(handler)} must be a sync or async function, got: {type(handler)}') + + monitor_task.cancel() + self.update(result=handler_return_value) + return self.result + + except asyncio.CancelledError as exc: + if monitor_task: + monitor_task.cancel() + handler_interrupted_error = asyncio.CancelledError( + f'Event handler {self.handler_name}#{self.handler_id[-4:]}({event}) was interrupted because of a parent timeout' + ) + self.update(error=handler_interrupted_error) + raise handler_interrupted_error from exc + + except TimeoutError as exc: + if monitor_task: + monitor_task.cancel() + children = ( + f' and interrupted any processing of {len(event.event_children)} child events' + if event.event_children + else '' + ) + timeout_error = TimeoutError( + f'Event handler {self.handler_name}#{self.handler_id[-4:]}({event}) timed out after {self.timeout}s{children}' + ) + self.update(error=timeout_error) + event.event_cancel_pending_child_processing(timeout_error) + + from bubus.logging import log_timeout_tree + + log_timeout_tree(event, self) + raise timeout_error from exc + + except Exception as exc: + if monitor_task: + monitor_task.cancel() + self.update(error=exc) + + red = '\033[91m' + reset = '\033[0m' + logger.error( + f'❌ {eventbus} Error in event handler {self.handler_name}({event}) -> \n{red}{type(exc).__name__}({exc}){reset}\n{_format_exception_for_log_callable(exc)}', + ) + raise + + finally: + if handler_task and not handler_task.done(): + handler_task.cancel() + try: + await asyncio.wait_for(handler_task, timeout=0.1) + except (asyncio.CancelledError, TimeoutError): + pass + + if monitor_task: + try: + if not monitor_task.done(): + monitor_task.cancel() + await monitor_task + except asyncio.CancelledError: + pass + except Exception: + pass + + # Only exit context if it was set outside the wrapper (i.e., no dispatch context) + if handler_context_tokens is not None: + _exit_handler_context_callable(handler_context_tokens) + def log_tree( - self, indent: str = '', is_last: bool = True, child_events_by_parent: dict[str | None, list[BaseEvent[Any]]] | None = None + self, + indent: str = '', + is_last: bool = True, + event_children_by_parent: dict[str | None, list[BaseEvent[Any]]] | None = None, ) -> None: """Print this result and its child events with proper tree formatting""" from bubus.logging import log_eventresult_tree - log_eventresult_tree(self, indent, is_last, child_events_by_parent) + log_eventresult_tree(self, indent, is_last, event_children_by_parent) # Resolve forward references diff --git a/bubus/package-lock.json b/bubus/package-lock.json deleted file mode 100644 index 0966feb..0000000 --- a/bubus/package-lock.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "name": "bubus", - "version": "0.1.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "bubus", - "version": "0.1.0", - "license": "MIT", - "dependencies": { - "uuidv7": "^1.0.0" - }, - "devDependencies": { - "@types/node": "^20.10.0", - "typescript": "^5.3.0" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/@types/node": { - "version": "20.19.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.1.tgz", - "integrity": "sha512-jJD50LtlD2dodAEO653i3YF04NWak6jN3ky+Ri3Em3mGR39/glWiboM/IePaRbgwSfqM1TpGXfAg8ohn/4dTgA==", - "dev": true, - "license": "MIT", - "dependencies": { - "undici-types": "~6.21.0" - } - }, - "node_modules/typescript": { - "version": "5.8.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", - "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/undici-types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", - "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/uuidv7": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/uuidv7/-/uuidv7-1.0.2.tgz", - "integrity": "sha512-8JQkH4ooXnm1JCIhqTMbtmdnYEn6oKukBxHn1Ic9878jMkL7daTI7anTExfY18VRCX7tcdn5quzvCb6EWrR8PA==", - "license": "Apache-2.0", - "bin": { - "uuidv7": "cli.js" - } - } - } -} diff --git a/bubus/service.py b/bubus/service.py index 72f652e..e77079c 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -6,16 +6,17 @@ import warnings import weakref from collections import defaultdict, deque -from collections.abc import Callable +from collections.abc import Callable, Sequence from contextvars import ContextVar +from datetime import UTC, datetime, timedelta from pathlib import Path from typing import Any, Literal, TypeVar, cast, overload -import anyio # pyright: ignore[reportMissingImports] from uuid_extensions import uuid7str # pyright: ignore[reportMissingImports, reportUnknownVariableType] uuid7str: Callable[[], str] = uuid7str # pyright: ignore +from bubus.event_history import EventHistory from bubus.models import ( BUBUS_LOGGING_LEVEL, AsyncEventHandlerClassMethod, @@ -27,6 +28,8 @@ EventHandlerClassMethod, EventHandlerFunc, EventHandlerMethod, + EventResult, + EventStatus, PythonIdentifierStr, PythonIdStr, T_Event, @@ -47,10 +50,38 @@ class QueueShutDown(Exception): pass -QueueEntryType = TypeVar('QueueEntryType', bound='BaseEvent[Any]') -T_ExpectedEvent = TypeVar('T_ExpectedEvent', bound='BaseEvent[Any]') +QueueEntryType = TypeVar('QueueEntryType', bound=BaseEvent[Any]) +T_ExpectedEvent = TypeVar('T_ExpectedEvent', bound=BaseEvent[Any]) +T_QueryEvent = TypeVar('T_QueryEvent', bound=BaseEvent[Any]) +T_QueryEvent = TypeVar('T_QueryEvent', bound=BaseEvent[Any]) -EventPatternType = PythonIdentifierStr | Literal['*'] | type['BaseEvent[Any]'] +EventPatternType = PythonIdentifierStr | Literal['*'] | type[BaseEvent[Any]] + + + +class EventBusMiddleware: + """Hookable lifecycle interface for observing or extending EventBus execution. + + Hooks: + on_event_change(eventbus, event, status): Called on event state transitions + on_event_result_change(eventbus, event, event_result, status): Called on EventResult state transitions + + Status values: EventStatus.PENDING, STARTED, COMPLETED, ERROR + """ + + async def on_event_change( + self, eventbus: 'EventBus', event: BaseEvent[Any], status: EventStatus + ) -> None: + """Called on event state transitions (pending, started, completed, error).""" + + async def on_event_result_change( + self, + eventbus: 'EventBus', + event: BaseEvent[Any], + event_result: EventResult[Any], + status: EventStatus, + ) -> None: + """Called on EventResult state transitions (pending, started, completed, error).""" class CleanShutdownQueue(asyncio.Queue[QueueEntryType]): @@ -135,7 +166,7 @@ def get_nowait(self) -> QueueEntryType: # Context variable to track the current event being processed (for setting event_parent_id from inside a child event) -_current_event_context: ContextVar['BaseEvent[Any] | None'] = ContextVar('current_event', default=None) +_current_event_context: ContextVar[BaseEvent[Any] | None] = ContextVar('current_event', default=None) # Context variable to track if we're inside a handler (for nested event detection) inside_handler_context: ContextVar[bool] = ContextVar('inside_handler', default=False) # Context variable to track if we hold the global lock (for re-entrancy across tasks) @@ -263,24 +294,25 @@ class EventBus: # Class Attributes name: PythonIdentifierStr = 'EventBus' parallel_handlers: bool = False - wal_path: Path | None = None # Runtime State id: UUIDStr = '00000000-0000-0000-0000-000000000000' - handlers: dict[PythonIdStr, list[ContravariantEventHandler['BaseEvent[Any]']]] # collected by .on(, ) - event_queue: CleanShutdownQueue['BaseEvent[Any]'] | None - event_history: dict[UUIDStr, 'BaseEvent[Any]'] # collected by .dispatch() + handlers: dict[PythonIdStr, list[ContravariantEventHandler[BaseEvent[Any]]]] + event_queue: CleanShutdownQueue[BaseEvent[Any]] | None + event_history: EventHistory[BaseEvent[Any]] _is_running: bool = False _runloop_task: asyncio.Task[None] | None = None _on_idle: asyncio.Event | None = None + _active_event_ids: set[str] + _processing_event_ids: set[str] def __init__( self, name: PythonIdentifierStr | None = None, - wal_path: Path | str | None = None, parallel_handlers: bool = False, max_history_size: int | None = 50, # Keep only 50 events in history + middlewares: Sequence[EventBusMiddleware] | None = None, ): self.id = uuid7str() self.name = name or f'{self.__class__.__name__}_{self.id[-8:]}' @@ -296,23 +328,10 @@ def __init__( for existing_bus in list(EventBus.all_instances): # Make a list copy to avoid modification during iteration if existing_bus is not self and existing_bus.name == self.name: - # Try to trigger collection of just this object by checking if it's collectable - # First, temporarily remove from WeakSet to see if that was the only reference - EventBus.all_instances.discard(existing_bus) - - # Check if the object is still reachable by creating a new weak reference - # If the object only existed in the WeakSet, it should be unreachable now - try: - # Try to access an attribute to see if the object is still valid - _ = existing_bus.name # This will work if object is still alive - - # Object is still alive with real references, restore to WeakSet - EventBus.all_instances.add(existing_bus) - conflicting_buses.append(existing_bus) - except Exception: - # Object was garbage collected or is invalid (e.g., AttributeError), that's fine - # Don't re-add to WeakSet, let it stay removed - pass + # Since stop() renames buses to _stopped_{id}, any bus with a matching + # user-specified name is either running or never-started - both should + # be considered conflicts. This makes name conflict detection deterministic. + conflicting_buses.append(existing_bus) # If we found conflicting buses, auto-generate a unique suffix if conflicting_buses: @@ -329,11 +348,13 @@ def __init__( ) self.event_queue = None - self.event_history = {} + self.event_history = EventHistory() self.handlers = defaultdict(list) self.parallel_handlers = parallel_handlers - self.wal_path = Path(wal_path) if wal_path else None self._on_idle = None + self.middlewares: list[EventBusMiddleware] = list(middlewares or []) + self._active_event_ids = set() + self._processing_event_ids = set() # Memory leak prevention settings self.max_history_size = max_history_size @@ -341,11 +362,6 @@ def __init__( # Register this instance EventBus.all_instances.add(self) - # Instead of registering as normal event handlers, - # these special handlers are just called manually at the end of step - # self.on('*', self._default_log_handler) - # self.on('*', self._default_wal_handler) - def __del__(self): """Auto-cleanup on garbage collection""" # Most cleanup should have been done by the event loop close hook @@ -366,27 +382,74 @@ def __del__(self): def __str__(self) -> str: icon = '🟒' if self._is_running else 'πŸ”΄' - return f'{self.name}{icon}(⏳ {len(self.events_pending or [])} | ▢️ {len(self.events_started or [])} | βœ… {len(self.events_completed or [])} ➑️ {len(self.handlers)} πŸ‘‚)' + queue_size = self.event_queue.qsize() if self.event_queue else 0 + return f'{self.name}{icon}(queue={queue_size} active={len(self._active_event_ids)} history={len(self.event_history)} handlers={len(self.handlers)})' def __repr__(self) -> str: return str(self) + async def _on_event_change(self, event: BaseEvent[Any], status: EventStatus) -> None: + if not self.middlewares: + return + for middleware in self.middlewares: + await middleware.on_event_change(self, event, status) + + async def _on_event_result_change( + self, event: BaseEvent[Any], event_result: EventResult[Any], status: EventStatus + ) -> None: + if not self.middlewares: + return + for middleware in self.middlewares: + await middleware.on_event_result_change(self, event, event_result, status) + + @staticmethod + def _is_event_complete_fast(event: BaseEvent[Any]) -> bool: + signal = event._event_completed_signal # pyright: ignore[reportPrivateUsage] + if signal is not None: + return signal.is_set() + if getattr(event, '_event_is_complete_flag', False): # pyright: ignore[reportPrivateUsage] + return True + return event.event_completed_at is not None + + @staticmethod + def _is_event_started_fast(event: BaseEvent[Any]) -> bool: + for result in event.event_results.values(): + if result.started_at is not None or result.status == 'started': + return True + return False + + def _has_inflight_events_fast(self) -> bool: + return bool(self._active_event_ids) + + @staticmethod + def _mark_event_complete_on_all_buses(event: BaseEvent[Any]) -> None: + event_id = event.event_id + for bus in list(EventBus.all_instances): + if bus: + bus._active_event_ids.discard(event_id) + @property - def events_pending(self) -> list['BaseEvent[Any]']: + def events_pending(self) -> list[BaseEvent[Any]]: """Get events that haven't started processing yet (does not include events that have not even finished dispatching yet in self.event_queue)""" return [ - event for event in self.event_history.values() if event.event_started_at is None and event.event_completed_at is None + event + for event in self.event_history.values() + if not self._is_event_complete_fast(event) and not self._is_event_started_fast(event) ] @property - def events_started(self) -> list['BaseEvent[Any]']: + def events_started(self) -> list[BaseEvent[Any]]: """Get events currently being processed""" - return [event for event in self.event_history.values() if event.event_started_at and not event.event_completed_at] + return [ + event + for event in self.event_history.values() + if not self._is_event_complete_fast(event) and self._is_event_started_fast(event) + ] @property - def events_completed(self) -> list['BaseEvent[Any]']: + def events_completed(self) -> list[BaseEvent[Any]]: """Get events that have completed processing""" - return [event for event in self.event_history.values() if event.event_completed_at is not None] + return [event for event in self.event_history.values() if self._is_event_complete_fast(event)] # Overloads for typed event patterns with specific handler signatures # Order matters - more specific types must come before general ones @@ -409,11 +472,11 @@ def on(self, event_pattern: EventPatternType, handler: AsyncEventHandlerMethod[T # 5. EventHandlerClassMethod[BaseEvent] - sync classmethod taking cls and event @overload - def on(self, event_pattern: EventPatternType, handler: EventHandlerClassMethod['BaseEvent[Any]']) -> None: ... + def on(self, event_pattern: EventPatternType, handler: EventHandlerClassMethod[BaseEvent[Any]]) -> None: ... # 6. AsyncEventHandlerClassMethod[BaseEvent] - async classmethod taking cls and event @overload - def on(self, event_pattern: EventPatternType, handler: AsyncEventHandlerClassMethod['BaseEvent[Any]']) -> None: ... + def on(self, event_pattern: EventPatternType, handler: AsyncEventHandlerClassMethod[BaseEvent[Any]]) -> None: ... # I dont think this is needed, but leaving it here for now # 9. Coroutine[Any, Any, Any] - direct coroutine @@ -423,13 +486,13 @@ def on(self, event_pattern: EventPatternType, handler: AsyncEventHandlerClassMet def on( self, event_pattern: EventPatternType, - handler: ( # TypeAlias with args doesnt work on overloaded signature, has to be defined inline + handler: ( # TypeAlias with args doesn't work on overloaded signature as of 2025, has to be defined inline! EventHandlerFunc[T_Event] - | AsyncEventHandlerFunc['BaseEvent[Any]'] + | AsyncEventHandlerFunc[BaseEvent[Any]] | EventHandlerMethod[T_Event] - | AsyncEventHandlerMethod['BaseEvent[Any]'] - | EventHandlerClassMethod['BaseEvent[Any]'] - | AsyncEventHandlerClassMethod['BaseEvent[Any]'] + | AsyncEventHandlerMethod[BaseEvent[Any]] + | EventHandlerClassMethod[BaseEvent[Any]] + | AsyncEventHandlerClassMethod[BaseEvent[Any]] ), ) -> None: """ @@ -472,7 +535,7 @@ def on( if new_handler_name in existing_registered_handlers: warnings.warn( f"⚠️ {self} Handler {new_handler_name} already registered for event '{event_key}'. " - f'This may cause ambiguous results when using name-based access. ' + f'This may make it difficult to filter event results by handler name. ' f'Consider using unique function names.', UserWarning, stacklevel=2, @@ -480,7 +543,13 @@ def on( # Register handler self.handlers[event_key].append(handler) # type: ignore - logger.debug(f'πŸ‘‚ {self}.on({event_key}, {get_handler_name(handler)}) Registered event handler') + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + 'πŸ‘‚ %s.on(%s, %s) Registered event handler', + self, + event_key, + get_handler_name(handler), + ) def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: """ @@ -517,6 +586,11 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: if current_event is not None: event.event_parent_id = current_event.event_id + # Capture dispatch-time context for propagation to handlers (GitHub issue #20) + # This ensures ContextVars set before dispatch() are accessible in handlers + if event._event_dispatch_context is None: # pyright: ignore[reportPrivateUsage] + event._event_dispatch_context = contextvars.copy_context() # pyright: ignore[reportPrivateUsage] + # Track child events - if we're inside a handler, add this event to the handler's event_children list # Only track if this is a NEW event (not forwarding an existing event) current_handler_id = _current_handler_id_context.get() @@ -533,9 +607,13 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: # NOT: event = event.model_copy(update={'event_path': event.event_path + [self.name]}) event.event_path.append(self.name) else: - logger.debug( - f'⚠️ {self}.dispatch({event.event_type}) - Bus already in path, not adding again. Path: {event.event_path}' - ) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + '⚠️ %s.dispatch(%s) - Bus already in path, not adding again. Path: %s', + self, + event.event_type, + event.event_path, + ) assert event.event_path, 'Missing event.event_path: list[str] (with at least the origin function name recorded in it)' assert all(entry.isidentifier() for entry in event.event_path), ( @@ -546,7 +624,12 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: # Only enforce if we have memory limits set if self.max_history_size is not None: queue_size = self.event_queue.qsize() if self.event_queue else 0 - pending_in_history = sum(1 for e in self.event_history.values() if e.event_status in ('pending', 'started')) + pending_in_history = 0 + for existing_event in self.event_history.values(): + if not self._is_event_complete_fast(existing_event): + pending_in_history += 1 + if queue_size + pending_in_history >= 100: + break total_pending = queue_size + pending_in_history if total_pending >= 100: @@ -565,9 +648,20 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: self.event_queue.put_nowait(event) # Only add to history after successfully queuing self.event_history[event.event_id] = event - logger.info( - f'πŸ—£οΈ {self}.dispatch({event.event_type}) ➑️ {event.event_type}#{event.event_id[-4:]} (#{self.event_queue.qsize()} {event.event_status})' - ) + self._active_event_ids.add(event.event_id) + if self.middlewares: + loop = asyncio.get_running_loop() + loop.create_task(self._on_event_change(event, EventStatus.PENDING)) + if logger.isEnabledFor(logging.INFO): + logger.info( + 'πŸ—£οΈ %s.dispatch(%s) ➑️ %s#%s (#%d %s)', + self, + event.event_type, + event.event_type, + event.event_id[-4:], + self.event_queue.qsize(), + event.event_status, + ) except asyncio.QueueFull: # Don't add to history if we can't queue it logger.error( @@ -575,61 +669,223 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: ) raise # could also block indefinitely until queue has space, but dont drop silently or delete events else: - logger.warning(f'⚠️ {self}.dispatch() called but event_queue is None! Event not queued: {event.event_type}') + logger.warning('⚠️ %s.dispatch() called but event_queue is None! Event not queued: %s', self, event.event_type) # Note: We do NOT pre-create EventResults here anymore. # EventResults are created only when handlers actually start executing. # This avoids "orphaned" pending results for handlers that get filtered out later. - # Clean up if over the limit - if self.max_history_size and len(self.event_history) > self.max_history_size: - self.cleanup_event_history() + # Soft cleanup during enqueue to prevent unbounded growth while keeping hot dispatch fast. + if self.max_history_size: + soft_limit = max(self.max_history_size, int(self.max_history_size * 1.2)) + if len(self.event_history) > soft_limit: + self.cleanup_event_history() return event + def _event_matches_pattern(self, event: BaseEvent[Any], pattern: EventPatternType) -> bool: + if pattern == '*': + return True + if isinstance(pattern, str): + return event.event_type == pattern + return isinstance(event, pattern) + + @overload + async def find( + self, + event_type: type[T_ExpectedEvent], + where: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: True, + child_of: BaseEvent[Any] | None = None, + past: bool | float = True, + future: bool | float = True, + ) -> T_ExpectedEvent | None: ... + + @overload + async def find( + self, + event_type: PythonIdentifierStr, + where: Callable[[BaseEvent[Any]], bool] = lambda _: True, + child_of: BaseEvent[Any] | None = None, + past: bool | float = True, + future: bool | float = True, + ) -> BaseEvent[Any] | None: ... + + async def find( + self, + event_type: PythonIdentifierStr | type[T_ExpectedEvent], + where: Callable[[BaseEvent[Any]], bool] = lambda _: True, + child_of: BaseEvent[Any] | None = None, + past: bool | float = True, + future: bool | float = True, + ) -> BaseEvent[Any] | T_ExpectedEvent | None: + """ + Find an event matching criteria in history and/or future. + + This is a unified method that can search past event_history, wait for future + events, or both. Use this instead of separate query() and expect() calls. + + Args: + event_type: The event type string or model class to find + where: Predicate function for filtering (default: lambda _: True) + child_of: Only match events that are descendants of this parent event + past: Controls history search behavior: + - True: search all history + - False: skip history search + - float: search events from last N seconds only + future: Controls future wait behavior: + - True: wait forever for matching event + - False: don't wait for future events + - float: wait up to N seconds for matching event + + Returns: + Matching event or None if not found/timeout + + Examples: + # Search all history, wait up to 5s for future + event = await bus.find(EventType, past=True, future=5) + + # Search last 5s of history, wait forever + event = await bus.find(EventType, past=5, future=True) + + # Search last 5s of history, wait up to 5s + event = await bus.find(EventType, past=5, future=5) + + # Search all history instantly, don't wait (debouncing) + event = await bus.find(EventType, past=True, future=False) + + # Wait up to 5s for future only (like old expect) + event = await bus.find(EventType, past=False, future=5) + + # Find child event that may have already fired + nav_event = await bus.dispatch(NavigateToUrlEvent(...)) + new_tab = await bus.find(TabCreatedEvent, child_of=nav_event, past=True, future=5) + """ + # If neither past nor future, return None immediately + if past is False and future is False: + return None + + # Build combined predicate including child_of check + def matches(event: BaseEvent[Any]) -> bool: + if not where(event): + return False + if child_of is not None and not self.event_is_child_of(event, child_of): + return False + return True + + # Search past history if enabled + if past is not False: + # Calculate cutoff time if past is a float (time window in seconds) + cutoff: datetime | None = None + if past is not True: # past is a float/int specifying time window + cutoff = datetime.now(UTC) - timedelta(seconds=float(past)) + + events = list(self.event_history.values()) + for event in reversed(events): + # Only match completed events in history + if event.event_completed_at is None: + continue + # Skip events older than cutoff (dispatched before the time window) + if cutoff is not None and event.event_created_at < cutoff: + continue + if not self._event_matches_pattern(event, event_type): + continue + if matches(event): + return event + + # If not searching future, return None + if future is False: + return None + + # Wait for future events using expect-like pattern + future_result: asyncio.Future[BaseEvent[Any]] = asyncio.Future() + + def notify_find_handler(event: BaseEvent[Any]) -> None: + """Handler that resolves the future when a matching event is found""" + if not future_result.done() and matches(event): + future_result.set_result(event) + + # Add debugging info to handler name + current_frame = inspect.currentframe() + assert current_frame + notify_find_handler.__name__ = f'{self}.find({event_type}, past={past}, future={future})@{_log_pretty_path(current_frame.f_code.co_filename)}:{current_frame.f_lineno}' + + # Register temporary listener + self.on(event_type, notify_find_handler) + + # Ensure the temporary handler runs before user handlers + event_key = event_type.__name__ if isinstance(event_type, type) else str(event_type) + handlers_for_key = self.handlers.get(event_key) + if handlers_for_key and handlers_for_key[-1] is notify_find_handler: + handlers_for_key.insert(0, handlers_for_key.pop()) + + try: + # Wait forever if future is True, otherwise wait up to N seconds + if future is True: + return await future_result + else: + return await asyncio.wait_for(future_result, timeout=float(future)) + except asyncio.TimeoutError: + return None + finally: + # Clean up handler + event_key = event_type.__name__ if isinstance(event_type, type) else str(event_type) + if event_key in self.handlers and notify_find_handler in self.handlers[event_key]: + self.handlers[event_key].remove(notify_find_handler) + @overload async def expect( self, event_type: type[T_ExpectedEvent], - include: Callable[['BaseEvent[Any]' | T_ExpectedEvent], bool] = lambda _: True, - exclude: Callable[['BaseEvent[Any]' | T_ExpectedEvent], bool] = lambda _: False, - predicate: Callable[['BaseEvent[Any]' | T_ExpectedEvent], bool] = lambda _: True, # deprecated, alias for include + include: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: True, timeout: float | None = None, - ) -> T_ExpectedEvent: ... + past: bool | float = False, + child_of: BaseEvent[Any] | None = None, + ) -> T_ExpectedEvent | None: ... @overload async def expect( self, event_type: PythonIdentifierStr, - include: Callable[['BaseEvent[Any]'], bool] = lambda _: True, - exclude: Callable[['BaseEvent[Any]'], bool] = lambda _: False, - predicate: Callable[['BaseEvent[Any]'], bool] = lambda _: True, # deprecated, alias for include + include: Callable[[BaseEvent[Any]], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any]], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, timeout: float | None = None, - ) -> 'BaseEvent[Any]': ... + past: bool | float = False, + child_of: BaseEvent[Any] | None = None, + ) -> BaseEvent[Any] | None: ... async def expect( self, event_type: PythonIdentifierStr | type[T_ExpectedEvent], - include: Callable[['BaseEvent[Any]'], bool] = lambda _: True, - exclude: Callable[['BaseEvent[Any]'], bool] = lambda _: False, - predicate: Callable[['BaseEvent[Any]'], bool] = lambda _: True, # deprecated, alias for include + include: Callable[[BaseEvent[Any]], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any]], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, timeout: float | None = None, - ) -> 'BaseEvent[Any]' | T_ExpectedEvent: + past: bool | float = False, + child_of: BaseEvent[Any] | None = None, + ) -> BaseEvent[Any] | T_ExpectedEvent | None: """ Wait for an event matching the given type/pattern with optional filters. + This is a backwards-compatible wrapper around find(). For new code, consider + using find() directly for clearer semantics. + Args: event_type: The event type string or model class to wait for include: Filter function that must return True for the event to match (default: lambda e: True) exclude: Filter function that must return False for the event to match (default: lambda e: False) predicate: Deprecated name, alias for include (default: lambda e: True) timeout: Maximum time to wait in seconds as a float (None = wait forever) + past: Controls history search (default: False): + - True: search all history first + - False: skip history search + - float: search events from last N seconds + child_of: Only match events that are descendants of this parent event Returns: - The first matching event - - Raises: - asyncio.TimeoutError: If timeout is reached before a matching event + The first matching event, or None if no match arrives before the timeout Example: # Wait for any response event @@ -648,38 +904,170 @@ async def expect( exclude=lambda e: e.error_code is not None, timeout=30 ) + + # Search history first, then wait for future + response = await eventbus.expect( + 'ResponseEvent', + past=True, + timeout=30 + ) + + .. deprecated:: + Use find() instead for clearer semantics: + ``await bus.find(EventType, where=..., past=False, future=timeout)`` """ - future: asyncio.Future['BaseEvent[Any]'] = asyncio.Future() + warnings.warn( + 'expect() is deprecated, use find() instead. ' + 'Example: await bus.find(EventType, where=lambda e: ..., past=False, future=30)', + DeprecationWarning, + stacklevel=2, + ) - # Handle backwards compatibility: merge predicate into include - if predicate is not None: # type: ignore[conditionAlwaysTrue] - original_include = include - include = lambda e, orig=original_include, pred=predicate: orig(e) and pred(e) + # Merge include/exclude/predicate into single where function for find() + def where(event: BaseEvent[Any]) -> bool: + if predicate is not None and not predicate(event): # type: ignore[truthy-function] + return False + if not include(event): + return False + if exclude(event): + return False + return True + + # Map timeout to future parameter: None -> True (wait forever), float -> float (wait N seconds) + future_param: bool | float = True if timeout is None else timeout + + # Delegate to find() + return await self.find( + event_type, + where=where, + child_of=child_of, + past=past, + future=future_param, + ) - def notify_expect_handler(event: 'BaseEvent[Any]') -> None: - """Handler that resolves the future when a matching event is found""" - if not future.done() and include(event) and not exclude(event): - future.set_result(event) + @overload + async def query( + self, + event_type: type[T_QueryEvent], + include: Callable[[BaseEvent[Any] | T_QueryEvent], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any] | T_QueryEvent], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any] | T_QueryEvent], bool] = lambda _: True, + since: timedelta | float | int | None = None, + ) -> T_QueryEvent | None: ... - # make debugging otherwise ephemeral async expect handlers easier by including some metadata in the stacktrace func names - current_frame = inspect.currentframe() - assert current_frame - notify_expect_handler.__name__ = f'{self}.expect({event_type}, timeout={timeout})@{_log_pretty_path(current_frame.f_code.co_filename)}:{current_frame.f_lineno}' # add file and line number to the name + @overload + async def query( + self, + event_type: PythonIdentifierStr | Literal['*'], + include: Callable[[BaseEvent[Any]], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any]], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, + since: timedelta | float | int | None = None, + ) -> BaseEvent[Any] | None: ... + + async def query( + self, + event_type: PythonIdentifierStr | Literal['*'] | type[T_QueryEvent], + include: Callable[[BaseEvent[Any]], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any]], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, + since: timedelta | float | int | None = None, + ) -> BaseEvent[Any] | T_QueryEvent | None: + """ + Return the most recent completed event matching the filters, or None if not found. - # Register temporary listener that watches for matching events and triggers the expect handler - self.on(event_type, notify_expect_handler) + This is a convenience wrapper around find() for searching history only. - try: - # Wait for the future with optional timeout - if timeout is not None: - return await asyncio.wait_for(future, timeout=timeout) - else: - return await future - finally: - # Clean up handler - event_key: str = event_type.__name__ if isinstance(event_type, type) else str(event_type) # pyright: ignore[reportUnknownMemberType, reportPartialTypeErrors] - if event_key in self.handlers and notify_expect_handler in self.handlers[event_key]: - self.handlers[event_key].remove(notify_expect_handler) + Args: + event_type: The event type string or model class to find + include: Filter function that must return True for the event to match + exclude: Filter function that must return False for the event to match + predicate: Deprecated alias for include + since: Only search events from the last N seconds (timedelta, float, or int) + + Returns: + The most recent matching event, or None if not found + + .. deprecated:: + Use find() instead for clearer semantics: + ``await bus.find(EventType, where=..., past=since, future=False)`` + """ + warnings.warn( + 'query() is deprecated, use find() instead. ' + 'Example: await bus.find(EventType, where=lambda e: ..., past=True, future=False)', + DeprecationWarning, + stacklevel=2, + ) + + # Merge include/exclude/predicate into single where function + def where(event: BaseEvent[Any]) -> bool: + if predicate is not None and not predicate(event): # type: ignore[truthy-function] + return False + if not include(event): + return False + if exclude(event): + return False + return True + + # Convert since to past parameter for find() + past_param: bool | float + if since is None: + past_param = True # Search all history + elif isinstance(since, timedelta): + if since < timedelta(0): + raise ValueError('since must be non-negative') + past_param = since.total_seconds() + else: + if since < 0: + raise ValueError('since must be non-negative') + past_param = float(since) + + # Delegate to find() with future=False (no waiting) + return await self.find( + event_type, + where=where, + past=past_param, + future=False, + ) + + def event_is_child_of(self, event: BaseEvent[Any], ancestor: BaseEvent[Any]) -> bool: + """ + Check if event is a descendant of ancestor (child, grandchild, etc.). + + Walks up the parent chain from event looking for ancestor. + Returns True if ancestor is found in the chain, False otherwise. + + Args: + event: The potential descendant event + ancestor: The potential ancestor event + + Returns: + True if event is a descendant of ancestor, False otherwise + """ + current_id = event.event_parent_id + visited: set[str] = set() + + while current_id and current_id not in visited: + if current_id == ancestor.event_id: + return True + visited.add(current_id) + + # Find parent event in any bus's history + parent = self.event_history.get(current_id) + if parent is None: + # Check other buses + for bus in list(EventBus.all_instances): + if bus is not self and current_id in bus.event_history: + parent = bus.event_history[current_id] + break + if parent is None: + break + current_id = parent.event_parent_id + + return False + + def event_is_parent_of(self, event: BaseEvent[Any], descendant: BaseEvent[Any]) -> bool: + return self.event_is_child_of(descendant, event) def _start(self) -> None: """Start the event bus if not already running""" @@ -728,12 +1116,17 @@ def close_with_cleanup() -> None: if self.event_queue is None: # Set queue size based on whether we have limits queue_size = 50 if self.max_history_size is not None else 0 # 0 = unlimited - self.event_queue = CleanShutdownQueue['BaseEvent[Any]'](maxsize=queue_size) + self.event_queue = CleanShutdownQueue[BaseEvent[Any]](maxsize=queue_size) self._on_idle = asyncio.Event() self._on_idle.clear() # Start in a busy state unless we confirm queue is empty by running step() at least once - # Create and start the run loop task - self._runloop_task = loop.create_task(self._run_loop(), name=f'{self}._run_loop') + # Create and start the run loop task. + # Use a weakref-based runner so an unreferenced EventBus can be GC'd + # without requiring explicit stop(clear=True) by callers. + self._runloop_task = loop.create_task( + EventBus._run_loop_weak(weakref.ref(self)), + name=f'{self}._run_loop', + ) self._is_running = True except RuntimeError: # No event loop - will start when one becomes available @@ -758,10 +1151,14 @@ async def stop(self, timeout: float | None = None, clear: bool = False) -> None: pass queue_size = self.event_queue.qsize() if self.event_queue else 0 - if queue_size or self.events_pending or self.events_started: + has_inflight = self._has_inflight_events_fast() + if queue_size or has_inflight: logger.debug( - f'⚠️ {self} stopping with pending events: Pending {len(self.events_pending) + queue_size} | Started {len(self.events_started)} | Completed {len(self.events_completed)}\n' - f'PENDING={str(self.events_pending)[:500]}\nSTARTED={str(self.events_started)[:500]}' + '⚠️ %s stopping with pending events: queue=%d inflight=%s history=%d', + self, + queue_size, + has_inflight, + len(self.event_history), ) # Signal shutdown @@ -783,13 +1180,22 @@ async def stop(self, timeout: float | None = None, clear: bool = False) -> None: # Clear references self._runloop_task = None + self._active_event_ids.clear() + self._processing_event_ids.clear() if self._on_idle: self._on_idle.set() + # Rename the bus to release the name. This ensures stopped buses don't + # cause name conflicts with new buses using the same name. This makes + # name conflict detection deterministic (not dependent on GC timing). + self.name = f'_stopped_{self.id[-8:]}' + # Clear event history and handlers if requested (for memory cleanup) if clear: self.event_history.clear() self.handlers.clear() + self._active_event_ids.clear() + # Remove from global instance tracking if self in EventBus.all_instances: EventBus.all_instances.discard(self) @@ -803,9 +1209,9 @@ async def stop(self, timeout: float | None = None, clear: bool = False) -> None: # No running loop, that's fine pass - logger.debug(f'🧹 {self} cleared event history and removed from global tracking') + logger.debug('🧹 %s cleared event history and removed from global tracking', self) - logger.debug(f'πŸ›‘ {self} shut down gracefully' if timeout is not None else f'πŸ›‘ {self} killed') + logger.debug('πŸ›‘ %s shut down %s', self, 'gracefully' if timeout is not None else 'immediately') # Check total memory usage across all instances try: @@ -843,7 +1249,7 @@ async def wait_until_idle(self, timeout: float | None = None) -> None: await asyncio.sleep(0) # Yield to event loop # Double-check we're truly idle - if new events came in, wait again - while not self._on_idle.is_set() or self.events_started or self.events_pending: + while not self._on_idle.is_set() or self._has_inflight_events_fast(): if timeout is not None: elapsed = asyncio.get_event_loop().time() - start_time remaining_timeout = max(0, timeout - elapsed) @@ -858,7 +1264,10 @@ async def wait_until_idle(self, timeout: float | None = None) -> None: except TimeoutError: logger.warning( - f'βŒ›οΈ {self} Timeout waiting for event bus to be idle after {timeout}s (processing: {len(self.events_started)})' + 'βŒ›οΈ %s Timeout waiting for event bus to be idle after %ss (history=%d)', + self, + timeout, + len(self.event_history), ) async def _run_loop(self) -> None: @@ -869,7 +1278,7 @@ async def _run_loop(self) -> None: _processed_event = await self.step() # Check if we should set idle state after processing if self._on_idle and self.event_queue: - if not (self.events_pending or self.events_started or self.event_queue.qsize()): + if not self._has_inflight_events_fast() and self.event_queue.qsize() == 0: self._on_idle.set() except QueueShutDown: # Queue was shut down, exit cleanly @@ -879,10 +1288,10 @@ async def _run_loop(self) -> None: if 'Event loop is closed' in str(e) or 'no running event loop' in str(e): break else: - logger.exception(f'❌ {self} Runtime error in event loop: {type(e).__name__} {e}', exc_info=True) + logger.exception('❌ %s Runtime error in event loop: %s %s', self, type(e).__name__, e, exc_info=True) # Continue running even if there's an error except Exception as e: - logger.exception(f'❌ {self} Error in event loop: {type(e).__name__} {e}', exc_info=True) + logger.exception('❌ %s Error in event loop: %s %s', self, type(e).__name__, e, exc_info=True) # Continue running even if there's an error except asyncio.CancelledError: # Task was cancelled, clean exit @@ -892,6 +1301,101 @@ async def _run_loop(self) -> None: # Don't call stop() here as it might create new tasks self._is_running = False + @staticmethod + async def _run_loop_weak(bus_ref: 'weakref.ReferenceType[EventBus]') -> None: + """ + Weakref-based run loop. + + Unlike a bound coroutine (self._run_loop), this runner avoids holding a + strong EventBus reference while idle, allowing unreferenced buses to be + garbage-collected naturally without an explicit stop(). + """ + try: + while True: + bus = bus_ref() + if bus is None or not bus._is_running: + break + + queue = bus.event_queue + on_idle = bus._on_idle + del bus + + if queue is None or on_idle is None: + await asyncio.sleep(0.01) + continue + + event: BaseEvent[Any] | None = None + try: + get_next_queued_event = asyncio.create_task(queue.get()) + if hasattr(get_next_queued_event, '_log_destroy_pending'): + get_next_queued_event._log_destroy_pending = False # type: ignore[attr-defined] + has_next_event, _pending = await asyncio.wait({get_next_queued_event}, timeout=0.1) + if not has_next_event: + get_next_queued_event.cancel() + bus = bus_ref() + if bus is None: + break + if bus._on_idle and bus.event_queue: + if not bus._has_inflight_events_fast() and bus.event_queue.qsize() == 0: + bus._on_idle.set() + del bus + continue + + event = await get_next_queued_event + except QueueShutDown: + break + except asyncio.CancelledError: + break + except RuntimeError as e: + if 'Event loop is closed' in str(e) or 'no running event loop' in str(e): + break + logger.exception(f'❌ Weak run loop runtime error: {type(e).__name__} {e}', exc_info=True) + continue + except Exception as e: + logger.exception(f'❌ Weak run loop error: {type(e).__name__} {e}', exc_info=True) + continue + + bus = bus_ref() + if bus is None: + try: + queue.task_done() + except Exception: + pass + break + + try: + if bus._on_idle: + bus._on_idle.clear() + + if event is not None: + bus._processing_event_ids.add(event.event_id) + async with _get_global_lock(): + if event is not None: + await bus.handle_event(event) + queue.task_done() + + if bus._on_idle and bus.event_queue: + if not bus._has_inflight_events_fast() and bus.event_queue.qsize() == 0: + bus._on_idle.set() + except QueueShutDown: + break + except asyncio.CancelledError: + break + except RuntimeError as e: + if 'Event loop is closed' in str(e) or 'no running event loop' in str(e): + break + logger.exception(f'❌ Weak run loop runtime error: {type(e).__name__} {e}', exc_info=True) + except Exception as e: + logger.exception(f'❌ Weak run loop error: {type(e).__name__} {e}', exc_info=True) + finally: + if event is not None: + bus._processing_event_ids.discard(event.event_id) + del bus + finally: + bus = bus_ref() + if bus is not None: + bus._is_running = False + async def _get_next_event(self, wait_for_timeout: float = 0.1) -> 'BaseEvent[Any] | None': """Get the next event from the queue""" @@ -918,7 +1422,7 @@ async def _get_next_event(self, wait_for_timeout: float = 0.1) -> 'BaseEvent[Any get_next_queued_event.cancel() # Check if we're idle, if so, set the idle flag - if not (self.events_pending or self.events_started or self.event_queue.qsize()): + if not self._has_inflight_events_fast() and self.event_queue.qsize() == 0: self._on_idle.set() return None @@ -929,7 +1433,40 @@ async def _get_next_event(self, wait_for_timeout: float = 0.1) -> 'BaseEvent[Any async def step( self, event: 'BaseEvent[Any] | None' = None, timeout: float | None = None, wait_for_timeout: float = 0.1 ) -> 'BaseEvent[Any] | None': - """Process a single event from the queue""" + """ + Consume and process a single event from the queue (one iteration of the run loop). + + This is the high-level "consumer" method that: + 1. Dequeues the next event (or uses one passed in) + 2. Acquires the global processing lock + 3. Calls handle_event() to execute handlers + 4. Marks the queue task as done (only if event came from queue) + 5. Manages idle state signaling + + Use this method when manually driving the event loop (e.g., in tests). + For automatic processing, use dispatch() which queues events for the run loop. + + Args: + event: Optional event to process directly (bypasses queue if provided) + timeout: Handler execution timeout in seconds + wait_for_timeout: How long to wait for next event from queue (default: 0.1s) + + Returns: + The processed event, or None if queue was empty/shutdown + + Warning: + Passing an event directly (bypassing the queue) is for advanced use only, be aware if: + + - **Event not in queue**: Works fine, handlers execute normally. + - **Event already completed**: Handlers will run AGAIN, overwriting previous + results. No guard against double-processing. + - **Event in queue but not next**: Event processes immediately, but STAYS + in queue. The run loop will process it again later (double-processing). + + See Also: + dispatch: Queues an event for normal async processing by the bus's existing run loop (recommended) + handle_event: Lower-level method that executes handlers (called by step) + """ assert self._on_idle and self.event_queue, 'EventBus._start() must be called before step()' # Track if we got the event from the queue @@ -942,44 +1479,81 @@ async def step( if event is None: return None - logger.debug(f'πŸƒ {self}.step({event}) STARTING') + if logger.isEnabledFor(logging.DEBUG): + logger.debug('πŸƒ %s.step(%s) STARTING', self, event) # Clear idle state when we get an event self._on_idle.clear() # Always acquire the global lock (it's re-entrant across tasks) - async with _get_global_lock(): - # Process the event - await self.process_event(event, timeout=timeout) + self._processing_event_ids.add(event.event_id) + try: + async with _get_global_lock(): + # Process the event + await self.handle_event(event, timeout=timeout) - # Mark task as done only if we got it from the queue - if from_queue: - self.event_queue.task_done() + # Mark task as done only if we got it from the queue + if from_queue: + self.event_queue.task_done() + finally: + self._processing_event_ids.discard(event.event_id) - logger.debug(f'βœ… {self}.step({event}) COMPLETE') + if logger.isEnabledFor(logging.DEBUG): + logger.debug('βœ… %s.step(%s) COMPLETE', self, event) return event - async def process_event(self, event: 'BaseEvent[Any]', timeout: float | None = None) -> None: - """Process a single event (assumes lock is already held)""" + async def handle_event(self, event: BaseEvent[Any], timeout: float | None = None) -> None: + """ + Execute all applicable handlers for an event (low-level, assumes lock is held). + + This is the core event handling method that: + 1. Finds all applicable handlers (type-specific + wildcard) + 2. Creates pending EventResult placeholders + 3. Executes handlers (serially or in parallel based on bus config) + 4. Marks the event as complete when all handlers finish + 5. Propagates completion status up the parent event chain + 6. Cleans up event history if over size limit + + IMPORTANT: This method assumes the global processing lock is already held. + For safe external use, call step() instead which handles locking. + + Args: + event: The event to handle + timeout: Handler execution timeout in seconds (defaults to event.event_timeout) + + Warning: + This is a low-level method with no safety guards. Behavior in edge cases: + + - **Event not in queue**: Works fine, handlers execute normally. This method + does not interact with the queue at all. + - **Event already completed**: Handlers run AGAIN, ``event_create_pending_results()`` + overwrites previous results. No guard against double-processing. + - **Event in queue but not next**: Works fine for this call, but event stays + in queue and will be processed again later by the run loop. + - **Another event being processed (lock held elsewhere)**: If called without + holding the lock, concurrent handler execution may cause race conditions. + If called from within a handler (lock is re-entrant), causes nested processing. + - **This exact event already being processed**: Recursive/re-entrant processing. + Handlers run again while already running, results overwritten mid-execution. + Likely to cause undefined behavior. + + See Also: + step: High-level method that acquires lock and calls handle_event + dispatch: Queues an event for async processing (recommended) + """ # Get applicable handlers applicable_handlers = self._get_applicable_handlers(event) - # Create pending EventResults for all applicable handlers before execution - # This ensures the event knows it has handlers and won't mark itself complete prematurely - for handler_id, handler in applicable_handlers.items(): - if handler_id not in event.event_results: - event.event_result_update( - handler=handler, eventbus=self, status='pending', timeout=timeout or event.event_timeout - ) - # Execute handlers await self._execute_handlers(event, handlers=applicable_handlers, timeout=timeout) - await self._default_log_handler(event) - await self._default_wal_handler(event) - - # Mark event as complete if all handlers are done - event.event_mark_complete_if_all_handlers_completed() + # Mark event as complete and emit change if it just completed + was_complete = self._is_event_complete_fast(event) + event.event_mark_complete_if_all_handlers_completed(current_bus=self) + just_completed = (not was_complete) and self._is_event_complete_fast(event) + if just_completed: + self._mark_event_complete_on_all_buses(event) + await self._on_event_change(event, EventStatus.COMPLETED) # After processing this event, check if any parent events can now be marked complete # We do this by walking up the parent chain @@ -991,27 +1565,34 @@ async def process_event(self, event: 'BaseEvent[Any]', timeout: float | None = N # Find parent event in any bus's history parent_event = None + parent_bus: EventBus | None = None # Create a list copy to avoid "Set changed size during iteration" error for bus in list(EventBus.all_instances): if bus and current.event_parent_id in bus.event_history: parent_event = bus.event_history[current.event_parent_id] + parent_bus = bus break if not parent_event: break # Check if parent can be marked complete - if parent_event.event_completed_signal and not parent_event.event_completed_signal.is_set(): - parent_event.event_mark_complete_if_all_handlers_completed() + was_complete = self._is_event_complete_fast(parent_event) + if not was_complete: + parent_event.event_mark_complete_if_all_handlers_completed(current_bus=parent_bus) + just_completed = (not was_complete) and self._is_event_complete_fast(parent_event) + if parent_bus and just_completed: + self._mark_event_complete_on_all_buses(parent_event) + await parent_bus._on_event_change(parent_event, EventStatus.COMPLETED) # Move up the chain current = parent_event # Clean up excess events to prevent memory leaks - if self.max_history_size: + if self.max_history_size and len(self.event_history) > self.max_history_size: self.cleanup_event_history() - def _get_applicable_handlers(self, event: 'BaseEvent[Any]') -> dict[str, EventHandler]: + def _get_applicable_handlers(self, event: BaseEvent[Any]) -> dict[str, EventHandler]: """Get all handlers that should process the given event, filtering out those that would create loops""" applicable_handlers: list[EventHandler] = [] @@ -1034,30 +1615,49 @@ def _get_applicable_handlers(self, event: 'BaseEvent[Any]') -> dict[str, EventHa return filtered_handlers + def _enter_handler_execution_context( + self, event: BaseEvent[Any], handler_id: str + ) -> tuple[contextvars.Token[Any], contextvars.Token[bool], contextvars.Token[str | None]]: + event_token = _current_event_context.set(event) + inside_handler_token = inside_handler_context.set(True) + current_handler_token = _current_handler_id_context.set(handler_id) + return event_token, inside_handler_token, current_handler_token + + def _exit_handler_execution_context( + self, + handler_context_tokens: tuple[contextvars.Token[Any], contextvars.Token[bool], contextvars.Token[str | None]], + ) -> None: + event_token, inside_handler_token, current_handler_token = handler_context_tokens + _current_event_context.reset(event_token) + inside_handler_context.reset(inside_handler_token) + _current_handler_id_context.reset(current_handler_token) + async def _execute_handlers( - self, event: 'BaseEvent[Any]', handlers: dict[PythonIdStr, EventHandler] | None = None, timeout: float | None = None + self, + event: BaseEvent[Any], + handlers: dict[PythonIdStr, EventHandler] | None = None, + timeout: float | None = None, ) -> None: """Execute all handlers for an event in parallel""" applicable_handlers = handlers if (handlers is not None) else self._get_applicable_handlers(event) if not applicable_handlers: - event.event_mark_complete_if_all_handlers_completed() # mark event completed immediately if it has no handlers - return + return # handle_event will mark complete + + pending_results = event.event_create_pending_results( + applicable_handlers, eventbus=self, timeout=timeout or event.event_timeout + ) + if self.middlewares: + for pending_result in pending_results.values(): + await self._on_event_result_change(event, pending_result, EventStatus.PENDING) # Execute all handlers in parallel if self.parallel_handlers: - handler_tasks: dict[PythonIdStr, tuple[asyncio.Task[Any], EventHandler]] = {} - # Copy the current context to ensure context vars are propagated - context = contextvars.copy_context() - for handler_id, handler in applicable_handlers.items(): - task = asyncio.create_task( - self.execute_handler(event, handler, timeout=timeout), - name=f'{self}.execute_handler({event}, {get_handler_name(handler)})', - context=context, - ) - handler_tasks[handler_id] = (task, handler) + handler_tasks: list[asyncio.Task[Any]] = [] + for handler in applicable_handlers.values(): + handler_tasks.append(asyncio.create_task(self.execute_handler(event, handler, timeout=timeout))) - # Wait for all handlers to complete - for handler_id, (task, handler) in handler_tasks.items(): + # Wait for all handlers to complete. + for task in handler_tasks: try: await task except Exception: @@ -1070,167 +1670,96 @@ async def _execute_handlers( await self.execute_handler(event, handler, timeout=timeout) except Exception as e: # Error already logged and recorded in execute_handler - logger.debug( - f'❌ {self} Handler {get_handler_name(handler)}#{str(id(handler))[-4:]}({event}) failed with {type(e).__name__}: {e}' - ) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + '❌ %s Handler %s#%s(%s) failed with %s: %s', + self, + get_handler_name(handler), + str(id(handler))[-4:], + event, + type(e).__name__, + e, + ) pass # print('FINSIHED EXECUTING ALL HANDLERS') async def execute_handler( - self, event: 'BaseEvent[T_EventResultType]', handler: EventHandler, timeout: float | None = None + self, + event: 'BaseEvent[T_EventResultType]', + handler: EventHandler, + timeout: float | None = None, ) -> Any: - """Safely execute a single handler with deadlock detection""" + """Safely execute a single handler with middleware support and EventResult orchestration.""" - # Check if this handler has already been executed for this event handler_id = get_handler_id(handler, self) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + ' ↳ %s.execute_handler(%s, handler=%s#%s)', + self, + event, + get_handler_name(handler), + handler_id[-4:], + ) - logger.debug(f' ↳ {self}.execute_handler({event}, handler={get_handler_name(handler)}#{handler_id[-4:]})') - if handler_id in event.event_results: - existing_result = event.event_results[handler_id] - if existing_result.started_at is not None: - raise RuntimeError( - f'Handler {get_handler_name(handler)}#{handler_id[-4:]} has already been executed for event {event.event_id}. ' - f'Previous execution started at {existing_result.started_at}' + if handler_id not in event.event_results: + new_results = event.event_create_pending_results( + {handler_id: handler}, eventbus=self, timeout=timeout or event.event_timeout + ) + for pending_result in new_results.values(): + await self._on_event_result_change( + event, pending_result, EventStatus.PENDING ) - # Mark handler as started - event_result = event.event_result_update( - handler=handler, eventbus=self, status='started', timeout=timeout or event.event_timeout - ) + event_result = event.event_results[handler_id] - # Set the current event in context so child events can reference it - token = _current_event_context.set(event) - # Mark that we're inside a handler - handler_token = inside_handler_context.set(True) - # Set the current handler ID so child events can be tracked - handler_id_token = _current_handler_id_context.set(handler_id) + # Check if this is the first handler to start (before updating status) + is_first_handler = not any(r.started_at for r in event.event_results.values()) - # Create a task to monitor for potential deadlock / slow handlers - async def deadlock_monitor(): - await asyncio.sleep(15.0) - logger.warning( - f'⚠️ {self} handler {get_handler_name(handler)}() has been running for >15s on event. Possible slow processing or deadlock.\n' - '(handler could be trying to await its own result or could be blocked by another async task).\n' - f'{get_handler_name(handler)}({event})' - ) + event_result.update(status='started', timeout=timeout or event.event_timeout) + await self._on_event_result_change(event, event_result, EventStatus.STARTED) - monitor_task = asyncio.create_task( - deadlock_monitor(), name=f'{self}.deadlock_monitor({event}, {get_handler_name(handler)}#{handler_id[-4:]})' - ) + # Emit event STARTED once (when first handler starts) + if is_first_handler: + await self._on_event_change(event, EventStatus.STARTED) - handler_task = None try: - if inspect.iscoroutinefunction(handler): - # Create a task for the handler so we can properly cancel it on timeout - handler_task = asyncio.create_task(handler(event)) # type: ignore - # This allows us to process child events when the handler awaits them - result_value: Any = await asyncio.wait_for(handler_task, timeout=event_result.timeout) - elif inspect.isfunction(handler) or inspect.ismethod(handler): - # If handler function is sync function, run it directly in the main thread - # This blocks but ensures we have access to the event loop, dont run it in a subthread! - result_value: Any = handler(event) - - # If the sync handler returned a BaseEvent (from dispatch), DON'T await it - # For forwarding handlers like bus.on('*', other_bus.dispatch), the handler - # has already queued the event on the target bus. The event will be tracked - # as a child event automatically. - if isinstance(result_value, BaseEvent): - logger.debug( - f'Handler {get_handler_name(handler)} returned BaseEvent, not awaiting to avoid circular dependency' - ) - else: - raise ValueError(f'Handler {get_handler_name(handler)} must be a sync or async function, got: {type(handler)}') - - logger.debug( - f' ↳ Handler {get_handler_name(handler)}#{handler_id[-4:]} returned: {type(result_value).__name__} {str(result_value)[:26]}...' # pyright: ignore + result_value = await event_result.execute( + event, + handler, + eventbus=self, + timeout=timeout or event.event_timeout, + enter_handler_context=self._enter_handler_execution_context, + exit_handler_context=self._exit_handler_execution_context, + format_exception_for_log=_log_filtered_traceback, ) - # Cancel the monitor task since handler completed successfully - monitor_task.cancel() - - # Record successful result - event.event_result_update(handler=handler, eventbus=self, result=result_value) - if handler_id in event.event_results: - # logger.debug( - # f' ↳ Updated result for {get_handler_name(handler)}#{handler_id[-4:]}: {event.event_results[handler_id].status}' - # ) - pass - else: - logger.error(f' ↳ ERROR: Result not found for {get_handler_name(handler)}#{handler_id[-4:]} after update!') - return cast(T_EventResultType, result_value) - except asyncio.CancelledError as e: - # Cancel the monitor task on timeout too - monitor_task.cancel() + result_type_name = type(result_value).__name__ if result_value is not None else 'None' + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + ' ↳ Handler %s#%s returned: %s', + get_handler_name(handler), + handler_id[-4:], + result_type_name, + ) - # Create a RuntimeError for timeout - # TODO: figure out why it breaks when we try to switch to InterruptedError instead of asyncio.CancelledError - handler_interrupted_error = asyncio.CancelledError( - f'Event handler {get_handler_name(handler)}#{handler_id[-4:]}({event}) was interrupted because of a parent timeout' + await self._on_event_result_change( + event, event_result, EventStatus.COMPLETED ) - event.event_result_update(handler=handler, eventbus=self, error=handler_interrupted_error) - - # import ipdb; ipdb.set_trace() - raise handler_interrupted_error from e - - except TimeoutError as e: - # Cancel the monitor task on timeout too - monitor_task.cancel() + return cast(T_EventResultType, result_value) - # Create a RuntimeError for timeout - children = ( - f' and interrupted any processing of {len(event.event_children)} child events' if event.event_children else '' - ) - handler_timeout_error = TimeoutError( - f'Event handler {get_handler_name(handler)}#{handler_id[-4:]}({event}) timed out after {event_result.timeout}s{children}' + except asyncio.CancelledError: + await self._on_event_result_change( + event, event_result, EventStatus.COMPLETED ) - event.event_result_update(handler=handler, eventbus=self, error=handler_timeout_error) - event.event_cancel_pending_child_processing(handler_timeout_error) - - from bubus.logging import log_timeout_tree - - log_timeout_tree(event, event_result) - # import ipdb; ipdb.set_trace() - raise handler_timeout_error from e - except Exception as e: - # Cancel the monitor task on error too - monitor_task.cancel() - - # Record error - event.event_result_update(handler=handler, eventbus=self, error=e) - - red = '\033[91m' - reset = '\033[0m' - logger.error( - f'❌ {self} Error in event handler {get_handler_name(handler)}({event}) -> \n{red}{type(e).__name__}({e}){reset}\n{_log_filtered_traceback(e)}', + raise + except Exception: + await self._on_event_result_change( + event, event_result, EventStatus.COMPLETED ) raise - finally: - # Reset context - _current_event_context.reset(token) - inside_handler_context.reset(handler_token) - _current_handler_id_context.reset(handler_id_token) - - # Ensure handler task is cancelled if it's still running - if handler_task and not handler_task.done(): - handler_task.cancel() - try: - await asyncio.wait_for(handler_task, timeout=0.1) - except (asyncio.CancelledError, TimeoutError): - pass # Expected when we cancel the task - - # Ensure monitor task is cancelled - try: - if not monitor_task.done(): - monitor_task.cancel() - await monitor_task - except asyncio.CancelledError: - pass # Expected when we cancel the monitor - except Exception as e: - # logger.debug(f"❌ {self} Handler monitor task cleanup error for {get_handler_name(handler)}#{str(id(handler))[-4:]}({event}): {type(e).__name__}: {e}") - pass - def _would_create_loop(self, event: 'BaseEvent[Any]', handler: EventHandler) -> bool: + def _would_create_loop(self, event: BaseEvent[Any], handler: EventHandler) -> bool: """Check if calling this handler would create a loop""" assert inspect.isfunction(handler) or inspect.iscoroutinefunction(handler) or inspect.ismethod(handler), ( @@ -1287,7 +1816,7 @@ def _would_create_loop(self, event: 'BaseEvent[Any]', handler: EventHandler) -> return False def _handler_dispatched_ancestor( - self, event: 'BaseEvent[Any]', handler_id: str, visited: set[str] | None = None, depth: int = 0 + self, event: BaseEvent[Any], handler_id: str, visited: set[str] | None = None, depth: int = 0 ) -> int: """Check how many times this handler appears in the ancestry chain. Returns the depth count.""" # Prevent infinite recursion in case of circular parent references @@ -1322,27 +1851,6 @@ def _handler_dispatched_ancestor( # Recursively check the parent's ancestry return self._handler_dispatched_ancestor(parent_event, handler_id, visited, depth) - async def _default_log_handler(self, event: 'BaseEvent[Any]') -> None: - """Default handler that logs all events""" - # logger.debug( - # f'βœ… {self} completed: {event} -> {list(event.event_results.values()) or ''}' - # ) - pass - - async def _default_wal_handler(self, event: 'BaseEvent[Any]') -> None: - """Persist completed event to WAL file as JSONL""" - - if not self.wal_path: - return None - - try: - event_json = event.model_dump_json() # pyright: ignore[reportUnknownMemberType] - self.wal_path.parent.mkdir(parents=True, exist_ok=True) - async with await anyio.open_file(self.wal_path, 'a', encoding='utf-8') as f: # pyright: ignore[reportUnknownMemberType] - await f.write(event_json + '\n') # pyright: ignore[reportUnknownMemberType] - except Exception as e: - logger.error(f'❌ {self} Failed to save event {event.event_id} to WAL file: {type(e).__name__} {e}\n{event}') - def cleanup_excess_events(self) -> int: """ Clean up excess events from event_history based on max_history_size. @@ -1364,7 +1872,8 @@ def cleanup_excess_events(self) -> int: del self.event_history[event_id] if event_ids_to_remove: - logger.debug(f'🧹 {self} Cleaned up {len(event_ids_to_remove)} excess events from history') + if logger.isEnabledFor(logging.DEBUG): + logger.debug('🧹 %s Cleaned up %d excess events from history', self, len(event_ids_to_remove)) return len(event_ids_to_remove) @@ -1380,17 +1889,17 @@ def cleanup_event_history(self) -> int: return 0 # Separate events by status - pending_events: list[tuple[str, 'BaseEvent[Any]']] = [] - started_events: list[tuple[str, 'BaseEvent[Any]']] = [] - completed_events: list[tuple[str, 'BaseEvent[Any]']] = [] + pending_events: list[tuple[str, BaseEvent[Any]]] = [] + started_events: list[tuple[str, BaseEvent[Any]]] = [] + completed_events: list[tuple[str, BaseEvent[Any]]] = [] for event_id, event in self.event_history.items(): - if event.event_status == 'pending': - pending_events.append((event_id, event)) - elif event.event_status == 'started': - started_events.append((event_id, event)) - else: # completed or error + if self._is_event_complete_fast(event): completed_events.append((event_id, event)) + elif self._is_event_started_fast(event): + started_events.append((event_id, event)) + else: + pending_events.append((event_id, event)) # Sort completed events by creation time (oldest first) completed_events.sort(key=lambda x: x[1].event_created_at.timestamp()) # pyright: ignore[reportUnknownMemberType, reportUnknownLambdaType] diff --git a/pyproject.toml b/pyproject.toml index 132c3bc..1ed8f9f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "bubus" description = "Advanced Pydantic-powered event bus with async support" authors = [{ name = "Nick Sweeting" }] -version = "1.5.6" +version = "1.7.3" readme = "README.md" requires-python = ">=3.11,<4.0" classifiers = [ diff --git a/tests/test_comprehensive_patterns.py b/tests/test_comprehensive_patterns.py index b98b211..e8a5784 100644 --- a/tests/test_comprehensive_patterns.py +++ b/tests/test_comprehensive_patterns.py @@ -1,5 +1,7 @@ """Test comprehensive event patterns including forwarding, async/sync dispatch, and parent-child tracking.""" +# pyright: reportUnusedVariable=false + import asyncio from typing import Any @@ -79,9 +81,13 @@ async def parent_bus1_handler(event: ParentEvent) -> str: print(' Handlers that processed this event:') for result in child_event_sync.event_results.values(): print(f' - {result.handler_name} (bus: {result.eventbus_name})') - # The event was processed by bus1 using bus2.dispatch handler + # The event was forwarded from bus1 and processed by bus2. + assert any( + result.eventbus_name == 'bus1' and 'dispatch' in result.handler_name + for result in child_event_sync.event_results.values() + ) assert any( - 'bus2' in result.handler_name and 'dispatch' in result.handler_name + result.eventbus_name == 'bus2' and 'child_bus2_event_handler' in result.handler_name for result in child_event_sync.event_results.values() ) print(' Event was successfully forwarded to bus2') @@ -110,6 +116,9 @@ async def parent_bus1_handler(event: ParentEvent) -> str: await bus1.wait_until_idle() await bus2.wait_until_idle() + # This is a happy-path test: no handler should have errored. + assert all(result.error is None for result in parent_event.event_results.values()), parent_event.event_results + # Verify all child events have correct parent print('\n5. Verifying all events have correct parent...') all_events = list(bus1.event_history.values()) @@ -120,8 +129,8 @@ async def parent_bus1_handler(event: ParentEvent) -> str: ) # Child events should have parent's ID - child_events = [e for e in all_events if isinstance(e, (ImmediateChildEvent, QueuedChildEvent))] - assert all(event.event_parent_id == parent_event.event_id for event in child_events) + event_children = [e for e in all_events if isinstance(e, (ImmediateChildEvent, QueuedChildEvent))] + assert all(event.event_parent_id == parent_event.event_id for event in event_children) # Sort results by sequence number to see actual execution order sorted_results = sorted(results, key=lambda x: x[0]) @@ -173,6 +182,47 @@ async def parent_bus1_handler(event: ParentEvent) -> str: await bus2.stop(clear=True) +async def test_await_forwarded_event_waits_for_target_bus_handlers(): + """ + Awaiting a dispatched event on source bus must wait for forwarded target-bus + handlers too, not only the source forwarding handler. + """ + bus_src = EventBus(name='ForwardWaitSrc') + bus_dst = EventBus(name='ForwardWaitDst') + + class ForwardedEvent(BaseEvent[str]): + pass + + target_started = asyncio.Event() + target_finished = asyncio.Event() + + async def target_handler(event: ForwardedEvent) -> str: + target_started.set() + await asyncio.sleep(0.05) + target_finished.set() + return 'target_done' + + bus_src.on('*', bus_dst.dispatch) + bus_dst.on(ForwardedEvent, target_handler) + + try: + t0 = asyncio.get_running_loop().time() + event = await bus_src.dispatch(ForwardedEvent()) + elapsed = asyncio.get_running_loop().time() - t0 + + assert target_started.is_set() + assert target_finished.is_set() + assert elapsed >= 0.04 + assert any( + result.eventbus_name == 'ForwardWaitDst' and result.handler_name.endswith('target_handler') + for result in event.event_results.values() + ), event.event_results + assert all(result.status in ('completed', 'error') for result in event.event_results.values()) + finally: + await bus_src.stop(clear=True) + await bus_dst.stop(clear=True) + + async def test_race_condition_stress(): """Stress test to ensure no race conditions.""" print('\n=== Test Race Condition Stress ===') @@ -244,10 +294,676 @@ def bad_handler(bad: BaseEvent[Any]) -> None: await bus2.stop(clear=True) +async def test_awaited_child_jumps_queue_no_overshoot(): + """ + Test the edge case in BaseEvent.__await__() (models.py): + - When a handler dispatches and awaits a child event, that child should + execute immediately (jumping the FIFO queue) + - Other queued events (Event2, Event3) should NOT be processed (no overshoot) + - FIFO order should be maintained for remaining events after completion + """ + print('\n=== Test Awaited Child Jumps Queue (No Overshoot) ===') + + bus = EventBus(name='TestBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Event3(BaseEvent[str]): + pass + + class ChildEvent(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + # Dispatch and await child - this should jump the queue + child = bus.dispatch(ChildEvent()) + execution_order.append('Child_dispatched') + await child + execution_order.append('Child_await_returned') + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + async def event3_handler(event: Event3) -> str: + execution_order.append('Event3_start') + execution_order.append('Event3_end') + return 'event3_done' + + async def child_handler(event: ChildEvent) -> str: + execution_order.append('Child_start') + execution_order.append('Child_end') + return 'child_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(ChildEvent, child_handler) + + try: + # Dispatch all three events (they go into the queue) + event1 = bus.dispatch(Event1()) + event2 = bus.dispatch(Event2()) + event3 = bus.dispatch(Event3()) + + # Verify events are queued + await asyncio.sleep(0) # Let dispatch settle + print(f'After dispatch: E1={event1.event_status}, E2={event2.event_status}, E3={event3.event_status}') + + # Await Event1 - this triggers processing and the child should jump queue + await event1 + + print(f'After await event1: {execution_order}') + print(f'Statuses: E1={event1.event_status}, E2={event2.event_status}, E3={event3.event_status}') + + # KEY ASSERTION 1: Child executed during Event1's handler (jumped queue) + assert 'Child_start' in execution_order, 'Child should have executed' + assert 'Child_end' in execution_order, 'Child should have completed' + child_start_idx = execution_order.index('Child_start') + child_end_idx = execution_order.index('Child_end') + event1_end_idx = execution_order.index('Event1_end') + assert child_start_idx < event1_end_idx, 'Child should execute before Event1 ends' + assert child_end_idx < event1_end_idx, 'Child should complete before Event1 ends' + + # KEY ASSERTION 2: Event2 and Event3 did NOT execute yet (no overshoot) + assert 'Event2_start' not in execution_order, \ + f'Event2 should NOT have started (no overshoot). Order: {execution_order}' + assert 'Event3_start' not in execution_order, \ + f'Event3 should NOT have started (no overshoot). Order: {execution_order}' + + # KEY ASSERTION 3: Event2 and Event3 are still pending + assert event2.event_status == 'pending', \ + f'Event2 should be pending, got {event2.event_status}' + assert event3.event_status == 'pending', \ + f'Event3 should be pending, got {event3.event_status}' + + # Now let the remaining events process + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # KEY ASSERTION 4: FIFO order maintained - Event2 before Event3 + event2_start_idx = execution_order.index('Event2_start') + event3_start_idx = execution_order.index('Event3_start') + assert event2_start_idx < event3_start_idx, 'FIFO: Event2 should start before Event3' + + # Verify all completed + assert event2.event_status == 'completed' + assert event3.event_status == 'completed' + + # KEY ASSERTION 5: event_history reflects dispatch order, but started_at/completed_at + # timestamps reflect actual execution order (post-reordering) + history_list = list(bus.event_history.values()) + history_types = [e.__class__.__name__ for e in history_list] + print(f'Event history (dispatch order): {history_types}') + + # Find the child event and E2/E3 + child_event = next(e for e in history_list if isinstance(e, ChildEvent)) + event2_from_history = next(e for e in history_list if isinstance(e, Event2)) + event3_from_history = next(e for e in history_list if isinstance(e, Event3)) + + # Verify execution order via timestamps: Child should have started before E2 and E3 + assert child_event.event_started_at is not None, 'Child should have started_at timestamp' + assert event2_from_history.event_started_at is not None, 'Event2 should have started_at timestamp' + assert event3_from_history.event_started_at is not None, 'Event3 should have started_at timestamp' + + assert child_event.event_started_at < event2_from_history.event_started_at, \ + f'Child should have started before Event2. Child: {child_event.event_started_at}, E2: {event2_from_history.event_started_at}' + assert child_event.event_started_at < event3_from_history.event_started_at, \ + f'Child should have started before Event3. Child: {child_event.event_started_at}, E3: {event3_from_history.event_started_at}' + + print(f'Child started_at: {child_event.event_started_at}') + print(f'Event2 started_at: {event2_from_history.event_started_at}') + print(f'Event3 started_at: {event3_from_history.event_started_at}') + + print('βœ… Awaited child jumps queue, no overshoot, FIFO maintained!') + + finally: + await bus.stop(clear=True) + + +async def test_dispatch_multiple_await_one_skips_others(): + """ + Test that when a handler dispatches multiple events and awaits only one, + the awaited event jumps the queue while the non-awaited ones stay in place. + + Scenario: + - Queue: [E1, E2, E3] + - E1 handler dispatches ChildA, ChildB, ChildC (queue becomes [E2, E3, ChildA, ChildB, ChildC]) + - E1 handler awaits only ChildB + - ChildB should jump to front and execute immediately + - ChildA and ChildC should NOT execute (they stay behind E2, E3 in queue) + - E2 and E3 should NOT execute during E1's handler + """ + print('\n=== Test Dispatch Multiple, Await One ===') + + bus = EventBus(name='MultiDispatchBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Event3(BaseEvent[str]): + pass + + class ChildA(BaseEvent[str]): + pass + + class ChildB(BaseEvent[str]): + pass + + class ChildC(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + + # Dispatch three children but only await the middle one + child_a = bus.dispatch(ChildA()) + execution_order.append('ChildA_dispatched') + + child_b = bus.dispatch(ChildB()) + execution_order.append('ChildB_dispatched') + + child_c = bus.dispatch(ChildC()) + execution_order.append('ChildC_dispatched') + + # Only await ChildB - it should jump the queue + await child_b + execution_order.append('ChildB_await_returned') + + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + async def event3_handler(event: Event3) -> str: + execution_order.append('Event3_start') + execution_order.append('Event3_end') + return 'event3_done' + + async def child_a_handler(event: ChildA) -> str: + execution_order.append('ChildA_start') + execution_order.append('ChildA_end') + return 'child_a_done' + + async def child_b_handler(event: ChildB) -> str: + execution_order.append('ChildB_start') + execution_order.append('ChildB_end') + return 'child_b_done' + + async def child_c_handler(event: ChildC) -> str: + execution_order.append('ChildC_start') + execution_order.append('ChildC_end') + return 'child_c_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(ChildA, child_a_handler) + bus.on(ChildB, child_b_handler) + bus.on(ChildC, child_c_handler) + + try: + # Dispatch E1, E2, E3 + event1 = bus.dispatch(Event1()) + event2 = bus.dispatch(Event2()) + event3 = bus.dispatch(Event3()) + + # Await E1 + await event1 + + print(f'After await event1: {execution_order}') + + # ChildB should have executed (it was awaited) + assert 'ChildB_start' in execution_order, 'ChildB should have executed' + assert 'ChildB_end' in execution_order, 'ChildB should have completed' + + # ChildB should have executed before Event1 ended (queue jump worked) + child_b_end_idx = execution_order.index('ChildB_end') + event1_end_idx = execution_order.index('Event1_end') + assert child_b_end_idx < event1_end_idx, 'ChildB should complete before Event1 ends' + + # ChildA and ChildC should NOT have executed BEFORE Event1 ended (no overshoot) + # They may have executed after Event1 completed (via background task), which is fine + if 'ChildA_start' in execution_order: + child_a_start_idx = execution_order.index('ChildA_start') + assert child_a_start_idx > event1_end_idx, \ + f'ChildA should NOT start before Event1 ends. Order: {execution_order}' + if 'ChildC_start' in execution_order: + child_c_start_idx = execution_order.index('ChildC_start') + assert child_c_start_idx > event1_end_idx, \ + f'ChildC should NOT start before Event1 ends. Order: {execution_order}' + + # E2 and E3 should NOT have executed BEFORE Event1 ended (no overshoot) + if 'Event2_start' in execution_order: + event2_start_idx = execution_order.index('Event2_start') + assert event2_start_idx > event1_end_idx, \ + f'Event2 should NOT start before Event1 ends. Order: {execution_order}' + if 'Event3_start' in execution_order: + event3_start_idx = execution_order.index('Event3_start') + assert event3_start_idx > event1_end_idx, \ + f'Event3 should NOT start before Event1 ends. Order: {execution_order}' + + # Now process remaining events + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # Verify FIFO order for remaining: E2, E3, ChildA, ChildC + # (ChildA and ChildC were dispatched after E2/E3 were already queued) + event2_start_idx = execution_order.index('Event2_start') + event3_start_idx = execution_order.index('Event3_start') + child_a_start_idx = execution_order.index('ChildA_start') + child_c_start_idx = execution_order.index('ChildC_start') + + assert event2_start_idx < event3_start_idx, 'FIFO: E2 before E3' + assert event3_start_idx < child_a_start_idx, 'FIFO: E3 before ChildA' + assert child_a_start_idx < child_c_start_idx, 'FIFO: ChildA before ChildC' + + print('βœ… Dispatch multiple, await one works correctly!') + + finally: + await bus.stop(clear=True) + + +async def test_multi_bus_forwarding_with_queued_events(): + """ + Test queue jumping with multiple buses that have forwarding set up, + where both buses already have events queued. + + Scenario: + - Bus1 has [E1, E2] queued + - Bus2 has [E3, E4] queued + - E1's handler dispatches Child to Bus1 and awaits it + - Child should jump Bus1's queue (ahead of E2) + - E3, E4 on Bus2 should NOT be affected + """ + print('\n=== Test Multi-Bus Forwarding With Queued Events ===') + + bus1 = EventBus(name='Bus1', max_history_size=100) + bus2 = EventBus(name='Bus2', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Event3(BaseEvent[str]): + pass + + class Event4(BaseEvent[str]): + pass + + class ChildEvent(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Bus1_Event1_start') + # Dispatch child to bus1 and await + child = bus1.dispatch(ChildEvent()) + execution_order.append('Child_dispatched_to_Bus1') + await child + execution_order.append('Child_await_returned') + execution_order.append('Bus1_Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Bus1_Event2_start') + execution_order.append('Bus1_Event2_end') + return 'event2_done' + + async def event3_handler(event: Event3) -> str: + execution_order.append('Bus2_Event3_start') + execution_order.append('Bus2_Event3_end') + return 'event3_done' + + async def event4_handler(event: Event4) -> str: + execution_order.append('Bus2_Event4_start') + execution_order.append('Bus2_Event4_end') + return 'event4_done' + + async def child_handler(event: ChildEvent) -> str: + execution_order.append('Child_start') + execution_order.append('Child_end') + return 'child_done' + + # Register handlers on respective buses + bus1.on(Event1, event1_handler) + bus1.on(Event2, event2_handler) + bus1.on(ChildEvent, child_handler) + + bus2.on(Event3, event3_handler) + bus2.on(Event4, event4_handler) + + try: + # Queue events on both buses + event1 = bus1.dispatch(Event1()) + event2 = bus1.dispatch(Event2()) + event3 = bus2.dispatch(Event3()) + event4 = bus2.dispatch(Event4()) + + await asyncio.sleep(0) # Let dispatch settle + + print(f'Bus1 queue size: {bus1.event_queue.qsize() if bus1.event_queue else 0}') + print(f'Bus2 queue size: {bus2.event_queue.qsize() if bus2.event_queue else 0}') + + # Await E1 - child should jump Bus1's queue + await event1 + + print(f'After await event1: {execution_order}') + + # Child should have executed + assert 'Child_start' in execution_order, 'Child should have executed' + assert 'Child_end' in execution_order, 'Child should have completed' + + # Child should have executed before Event1 ended + child_end_idx = execution_order.index('Child_end') + event1_end_idx = execution_order.index('Bus1_Event1_end') + assert child_end_idx < event1_end_idx, 'Child should complete before Event1 ends' + + # E2 on Bus1 should NOT have executed yet + assert 'Bus1_Event2_start' not in execution_order, \ + f'E2 on Bus1 should NOT have started. Order: {execution_order}' + + # E3 and E4 on Bus2 should NOT have executed yet + assert 'Bus2_Event3_start' not in execution_order, \ + f'E3 on Bus2 should NOT have started. Order: {execution_order}' + assert 'Bus2_Event4_start' not in execution_order, \ + f'E4 on Bus2 should NOT have started. Order: {execution_order}' + + # Now process remaining events on both buses + await bus1.wait_until_idle() + await bus2.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # Verify all events eventually executed + assert 'Bus1_Event2_start' in execution_order + assert 'Bus2_Event3_start' in execution_order + assert 'Bus2_Event4_start' in execution_order + + print('βœ… Multi-bus forwarding with queued events works correctly!') + + finally: + await bus1.stop(clear=True) + await bus2.stop(clear=True) + + +async def test_await_already_completed_event(): + """ + Test that awaiting an event that's already completed is a no-op. + The event isn't in the queue anymore, so there's nothing to reorder. + """ + print('\n=== Test Await Already Completed Event ===') + + bus = EventBus(name='AlreadyCompletedBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + + try: + # Dispatch and await E1 first + event1 = await bus.dispatch(Event1()) + assert event1.event_status == 'completed' + + # Now dispatch E2 + event2 = bus.dispatch(Event2()) + + # Await E1 again - should be a no-op since it's already completed + await event1 # Should return immediately + + print(f'After second await event1: {execution_order}') + + # E2 should NOT have executed yet (we didn't trigger processing) + # The second await on completed E1 should just return without processing queue + assert event2.event_status == 'pending', \ + f'E2 should still be pending, got {event2.event_status}' + + # Complete E2 + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + print('βœ… Await already completed event works correctly!') + + finally: + await bus.stop(clear=True) + + +async def test_multiple_awaits_same_event(): + """ + Test that multiple concurrent awaits on the same event work correctly. + Only the first await should trigger queue reordering; subsequent awaits + should just wait on the completion signal. + """ + print('\n=== Test Multiple Awaits Same Event ===') + + bus = EventBus(name='MultiAwaitBus', max_history_size=100) + execution_order: list[str] = [] + await_results: list[str] = [] + child_ref: BaseEvent[str] | None = None + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class ChildEvent(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + nonlocal child_ref + execution_order.append('Event1_start') + + # Dispatch child + child = bus.dispatch(ChildEvent()) + child_ref = child + + # Create multiple concurrent awaits on the same child + async def await_child(name: str): + await child + await_results.append(f'{name}_completed') + + # Start two concurrent awaits + task1 = asyncio.create_task(await_child('await1')) + task2 = asyncio.create_task(await_child('await2')) + + # Wait for both + await asyncio.gather(task1, task2) + execution_order.append('Both_awaits_completed') + + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + async def child_handler(event: ChildEvent) -> str: + execution_order.append('Child_start') + await asyncio.sleep(0.01) # Small delay to ensure both awaits are waiting + execution_order.append('Child_end') + return 'child_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(ChildEvent, child_handler) + + try: + event1 = bus.dispatch(Event1()) + event2 = bus.dispatch(Event2()) + + await event1 + + print(f'After await event1: {execution_order}') + print(f'Await results: {await_results}') + + # Both awaits should have completed + assert len(await_results) == 2, f'Both awaits should complete, got {await_results}' + assert 'await1_completed' in await_results + assert 'await2_completed' in await_results + + # Child should have executed exactly once and before Event1 ended + assert execution_order.count('Child_start') == 1 + assert execution_order.count('Child_end') == 1 + assert 'Child_start' in execution_order + assert 'Child_end' in execution_order + child_end_idx = execution_order.index('Child_end') + event1_end_idx = execution_order.index('Event1_end') + assert child_end_idx < event1_end_idx + + # Child event should have exactly one handler result (no double-run). + assert child_ref is not None + assert len(child_ref.event_results) == 1 + + # E2 should NOT have executed yet + assert 'Event2_start' not in execution_order, \ + f'E2 should NOT have started. Order: {execution_order}' + + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + print('βœ… Multiple awaits same event works correctly!') + + finally: + await bus.stop(clear=True) + + +async def test_deeply_nested_awaited_children(): + """ + Test deeply nested awaited children: Event1 awaits Child1, which awaits Child2. + All should complete before Event2 starts (no overshoot at any level). + """ + print('\n=== Test Deeply Nested Awaited Children ===') + + bus = EventBus(name='DeepNestedBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Child1(BaseEvent[str]): + pass + + class Child2(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + child1 = bus.dispatch(Child1()) + await child1 + execution_order.append('Event1_end') + return 'event1_done' + + async def child1_handler(event: Child1) -> str: + execution_order.append('Child1_start') + child2 = bus.dispatch(Child2()) + await child2 + execution_order.append('Child1_end') + return 'child1_done' + + async def child2_handler(event: Child2) -> str: + execution_order.append('Child2_start') + execution_order.append('Child2_end') + return 'child2_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + bus.on(Event1, event1_handler) + bus.on(Child1, child1_handler) + bus.on(Child2, child2_handler) + bus.on(Event2, event2_handler) + + try: + event1 = bus.dispatch(Event1()) + event2 = bus.dispatch(Event2()) + + await event1 + + print(f'After await event1: {execution_order}') + + # All nested children should have completed + assert 'Child1_start' in execution_order + assert 'Child1_end' in execution_order + assert 'Child2_start' in execution_order + assert 'Child2_end' in execution_order + + # Verify nesting order: Child2 completes before Child1 + child2_end_idx = execution_order.index('Child2_end') + child1_end_idx = execution_order.index('Child1_end') + event1_end_idx = execution_order.index('Event1_end') + assert child2_end_idx < child1_end_idx < event1_end_idx + + # E2 should NOT have started + assert 'Event2_start' not in execution_order, \ + f'E2 should NOT have started. Order: {execution_order}' + + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # E2 should start after E1 ends + event2_start_idx = execution_order.index('Event2_start') + assert event2_start_idx > event1_end_idx + + print('βœ… Deeply nested awaited children works correctly!') + + finally: + await bus.stop(clear=True) + + async def main(): """Run all tests.""" await test_comprehensive_patterns() await test_race_condition_stress() + await test_awaited_child_jumps_queue_no_overshoot() + await test_dispatch_multiple_await_one_skips_others() + await test_multi_bus_forwarding_with_queued_events() + await test_await_already_completed_event() + await test_multiple_awaits_same_event() + await test_deeply_nested_awaited_children() if __name__ == '__main__': diff --git a/tests/test_context_propagation.py b/tests/test_context_propagation.py new file mode 100644 index 0000000..dd597e6 --- /dev/null +++ b/tests/test_context_propagation.py @@ -0,0 +1,422 @@ +""" +Tests for ContextVar propagation through event dispatch and handler execution. + +This addresses GitHub issue #20: ContextVar values set before dispatch should +be accessible inside event handlers. + +The key insight is that context must be captured at DISPATCH time (when the +user calls bus.dispatch()), not at PROCESSING time (when the event is pulled +from the queue and handlers are executed). +""" + +# pyright: reportUnusedVariable=false +# pyright: reportUnusedFunction=false + +import asyncio +from contextvars import ContextVar +from typing import Any + +import pytest + +from bubus import BaseEvent, EventBus + + +# Test context variables (simulating user-defined context like request_id) +request_id_var: ContextVar[str] = ContextVar('request_id', default='') +user_id_var: ContextVar[str] = ContextVar('user_id', default='') +trace_id_var: ContextVar[str] = ContextVar('trace_id', default='') + + +class SimpleEvent(BaseEvent[str]): + """Simple event for context propagation tests.""" + pass + + +class ChildEvent(BaseEvent[str]): + """Child event for nested context tests.""" + pass + + +class TestContextPropagation: + """Test that ContextVar values propagate from dispatch site to handlers.""" + + async def test_contextvar_propagates_to_handler(self): + """ + Basic test: ContextVar set before dispatch should be accessible in handler. + + This is the core issue from GitHub #20. + """ + bus = EventBus(name='ContextTestBus') + captured_values: dict[str, str] = {} + + async def handler(event: SimpleEvent) -> str: + # These should have the values set BEFORE dispatch, not defaults + captured_values['request_id'] = request_id_var.get() + captured_values['user_id'] = user_id_var.get() + return 'handled' + + bus.on(SimpleEvent, handler) + + try: + # Set context values (simulating FastAPI request context) + request_id_var.set('req-12345') + user_id_var.set('user-abc') + + # Dispatch and await + event = await bus.dispatch(SimpleEvent()) + + # Handler should have seen the context values + assert captured_values['request_id'] == 'req-12345', \ + f"Expected 'req-12345', got '{captured_values['request_id']}'" + assert captured_values['user_id'] == 'user-abc', \ + f"Expected 'user-abc', got '{captured_values['user_id']}'" + + finally: + await bus.stop(clear=True) + + async def test_contextvar_propagates_through_nested_handlers(self): + """ + Nested dispatch: Context should propagate through parent -> child handlers. + + When a handler dispatches and awaits a child event, the child handler + should also have access to the original context. + """ + bus = EventBus(name='NestedContextBus') + captured_parent: dict[str, str] = {} + captured_child: dict[str, str] = {} + + async def parent_handler(event: SimpleEvent) -> str: + captured_parent['request_id'] = request_id_var.get() + captured_parent['trace_id'] = trace_id_var.get() + + # Dispatch child event + child = await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + captured_child['request_id'] = request_id_var.get() + captured_child['trace_id'] = trace_id_var.get() + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + # Set context + request_id_var.set('req-nested-123') + trace_id_var.set('trace-xyz') + + await bus.dispatch(SimpleEvent()) + + # Both handlers should see the context + assert captured_parent['request_id'] == 'req-nested-123' + assert captured_parent['trace_id'] == 'trace-xyz' + assert captured_child['request_id'] == 'req-nested-123' + assert captured_child['trace_id'] == 'trace-xyz' + + finally: + await bus.stop(clear=True) + + async def test_context_isolation_between_dispatches(self): + """ + Different dispatches should have isolated contexts. + + If dispatch A sets request_id='A' and dispatch B sets request_id='B', + handler A should see 'A' and handler B should see 'B'. + """ + bus = EventBus(name='IsolationTestBus') + captured_values: list[str] = [] + + async def handler(event: SimpleEvent) -> str: + # Small delay to ensure both handlers run + await asyncio.sleep(0.01) + captured_values.append(request_id_var.get()) + return 'handled' + + bus.on(SimpleEvent, handler) + + try: + # Dispatch two events with different contexts + async def dispatch_with_context(req_id: str): + request_id_var.set(req_id) + await bus.dispatch(SimpleEvent()) + + # Run both dispatches + request_id_var.set('req-A') + event_a = bus.dispatch(SimpleEvent()) + + request_id_var.set('req-B') + event_b = bus.dispatch(SimpleEvent()) + + await event_a + await event_b + + # Each handler should have seen its own context + # Note: order might vary, so just check both values are present + assert 'req-A' in captured_values, f"Expected 'req-A' in {captured_values}" + assert 'req-B' in captured_values, f"Expected 'req-B' in {captured_values}" + + finally: + await bus.stop(clear=True) + + async def test_context_propagates_to_parallel_handlers(self): + """ + When parallel_handlers=True, all handlers should see the dispatch context. + """ + bus = EventBus(name='ParallelContextBus', parallel_handlers=True) + captured_values: list[str] = [] + lock = asyncio.Lock() + + async def handler1(event: SimpleEvent) -> str: + async with lock: + captured_values.append(f'h1:{request_id_var.get()}') + return 'h1_done' + + async def handler2(event: SimpleEvent) -> str: + async with lock: + captured_values.append(f'h2:{request_id_var.get()}') + return 'h2_done' + + bus.on(SimpleEvent, handler1) + bus.on(SimpleEvent, handler2) + + try: + request_id_var.set('req-parallel') + await bus.dispatch(SimpleEvent()) + + assert 'h1:req-parallel' in captured_values, f"Handler1 didn't see context: {captured_values}" + assert 'h2:req-parallel' in captured_values, f"Handler2 didn't see context: {captured_values}" + + finally: + await bus.stop(clear=True) + + async def test_context_propagates_through_event_forwarding(self): + """ + When events are forwarded between buses, context should propagate. + """ + bus1 = EventBus(name='Bus1') + bus2 = EventBus(name='Bus2') + captured_bus1: dict[str, str] = {} + captured_bus2: dict[str, str] = {} + + async def bus1_handler(event: SimpleEvent) -> str: + captured_bus1['request_id'] = request_id_var.get() + return 'bus1_done' + + async def bus2_handler(event: SimpleEvent) -> str: + captured_bus2['request_id'] = request_id_var.get() + return 'bus2_done' + + bus1.on(SimpleEvent, bus1_handler) + bus1.on('*', bus2.dispatch) # Forward all events to bus2 + bus2.on(SimpleEvent, bus2_handler) + + try: + request_id_var.set('req-forwarded') + await bus1.dispatch(SimpleEvent()) + await bus2.wait_until_idle() + + assert captured_bus1['request_id'] == 'req-forwarded', \ + f"Bus1 handler didn't see context: {captured_bus1}" + assert captured_bus2['request_id'] == 'req-forwarded', \ + f"Bus2 handler didn't see context: {captured_bus2}" + + finally: + await bus1.stop(clear=True) + await bus2.stop(clear=True) + + async def test_handler_can_modify_context_without_affecting_parent(self): + """ + Handler modifications to ContextVar should not affect the parent context. + + This ensures context is properly copied, not shared. + """ + bus = EventBus(name='ModifyContextBus') + parent_value_after_child: str = '' + + async def parent_handler(event: SimpleEvent) -> str: + nonlocal parent_value_after_child + # Set a value in parent + request_id_var.set('parent-value') + + # Dispatch child which will modify the context + await bus.dispatch(ChildEvent()) + + # Parent's context should be unchanged + parent_value_after_child = request_id_var.get() + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + # Modify context in child + request_id_var.set('child-modified') + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + await bus.dispatch(SimpleEvent()) + + # Parent should still see its own value, not child's modification + assert parent_value_after_child == 'parent-value', \ + f"Parent context was modified by child: got '{parent_value_after_child}'" + + finally: + await bus.stop(clear=True) + + async def test_event_parent_id_tracking_still_works(self): + """ + Critical: Internal context vars (event_parent_id tracking) must still work + when we propagate dispatch-time context. + + This ensures our context merging doesn't break the bubus internals. + """ + bus = EventBus(name='ParentIdTrackingBus') + parent_event_id: str | None = None + child_event_parent_id: str | None = None + + async def parent_handler(event: SimpleEvent) -> str: + nonlocal parent_event_id + parent_event_id = event.event_id + + # Child event should automatically get parent_id set + child = await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + nonlocal child_event_parent_id + child_event_parent_id = event.event_parent_id + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + # Set user context (to ensure we're testing the merge scenario) + request_id_var.set('req-parent-tracking') + + await bus.dispatch(SimpleEvent()) + + # Verify parent ID tracking works + assert parent_event_id is not None, "Parent event ID was not captured" + assert child_event_parent_id is not None, "Child event parent ID was not set" + assert child_event_parent_id == parent_event_id, \ + f"Child's parent_id ({child_event_parent_id}) doesn't match parent's id ({parent_event_id})" + + finally: + await bus.stop(clear=True) + + async def test_dispatch_context_and_parent_id_both_work(self): + """ + Both user-defined ContextVars AND internal event tracking must work together. + + This is the key test for context stacking/merging. + """ + bus = EventBus(name='CombinedContextBus') + results: dict[str, Any] = {} + + async def parent_handler(event: SimpleEvent) -> str: + results['parent_request_id'] = request_id_var.get() + results['parent_event_id'] = event.event_id + + # Dispatch child - should get both user context AND parent tracking + child = await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + results['child_request_id'] = request_id_var.get() + results['child_event_parent_id'] = event.event_parent_id + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + # Set user context + request_id_var.set('req-combined-test') + + await bus.dispatch(SimpleEvent()) + + # User context should propagate + assert results['parent_request_id'] == 'req-combined-test', \ + f"Parent didn't see user context: {results['parent_request_id']}" + assert results['child_request_id'] == 'req-combined-test', \ + f"Child didn't see user context: {results['child_request_id']}" + + # Internal parent tracking should also work + assert results['child_event_parent_id'] == results['parent_event_id'], \ + f"Parent ID tracking broken: child.parent_id={results['child_event_parent_id']}, parent.id={results['parent_event_id']}" + + finally: + await bus.stop(clear=True) + + async def test_deeply_nested_context_and_parent_tracking(self): + """ + Test that both user context and parent tracking work through multiple levels. + """ + bus = EventBus(name='DeepNestingBus') + results: list[dict[str, Any]] = [] + + class Level2Event(BaseEvent[str]): + pass + + class Level3Event(BaseEvent[str]): + pass + + async def level1_handler(event: SimpleEvent) -> str: + results.append({ + 'level': 1, + 'request_id': request_id_var.get(), + 'event_id': event.event_id, + 'parent_id': event.event_parent_id, + }) + await bus.dispatch(Level2Event()) + return 'level1_done' + + async def level2_handler(event: Level2Event) -> str: + results.append({ + 'level': 2, + 'request_id': request_id_var.get(), + 'event_id': event.event_id, + 'parent_id': event.event_parent_id, + }) + await bus.dispatch(Level3Event()) + return 'level2_done' + + async def level3_handler(event: Level3Event) -> str: + results.append({ + 'level': 3, + 'request_id': request_id_var.get(), + 'event_id': event.event_id, + 'parent_id': event.event_parent_id, + }) + return 'level3_done' + + bus.on(SimpleEvent, level1_handler) + bus.on(Level2Event, level2_handler) + bus.on(Level3Event, level3_handler) + + try: + request_id_var.set('req-deep-nesting') + + await bus.dispatch(SimpleEvent()) + + # All levels should see the user context + assert len(results) == 3, f"Expected 3 levels, got {len(results)}" + for r in results: + assert r['request_id'] == 'req-deep-nesting', \ + f"Level {r['level']} didn't see user context: {r['request_id']}" + + # Parent chain should be correct + assert results[0]['parent_id'] is None, "Level 1 should have no parent" + assert results[1]['parent_id'] == results[0]['event_id'], \ + f"Level 2 parent mismatch: {results[1]['parent_id']} != {results[0]['event_id']}" + assert results[2]['parent_id'] == results[1]['event_id'], \ + f"Level 3 parent mismatch: {results[2]['parent_id']} != {results[1]['event_id']}" + + finally: + await bus.stop(clear=True) + + +if __name__ == '__main__': + pytest.main([__file__, '-v', '-s']) diff --git a/tests/test_event_history_mirroring.py b/tests/test_event_history_mirroring.py new file mode 100644 index 0000000..ac2bbef --- /dev/null +++ b/tests/test_event_history_mirroring.py @@ -0,0 +1,147 @@ +# pyright: basic +"""Tests for mirroring event history snapshots via middleware.""" + +from __future__ import annotations + +import asyncio +import multiprocessing +import sqlite3 +from pathlib import Path +from typing import Any, Sequence + +import pytest + +from bubus import BaseEvent, EventBus, SQLiteHistoryMirrorMiddleware + + +class HistoryTestEvent(BaseEvent): + """Event for verifying middleware mirroring behaviour.""" + + payload: str + should_fail: bool = False + + +def _summarize_history(history: dict[str, BaseEvent[Any]]) -> list[dict[str, Any]]: + """Collect comparable information about events stored in history.""" + summary: list[dict[str, Any]] = [] + for event in history.values(): + handler_results = [ + { + 'handler_name': result.handler_name.rsplit('.', 1)[-1], + 'status': result.status, + 'result': result.result, + 'error': repr(result.error) if result.error else None, + } + for result in sorted(event.event_results.values(), key=lambda r: r.handler_name) + ] + summary.append( + { + 'event_type': event.event_type, + 'event_status': event.event_status, + 'event_path_length': len(event.event_path), + 'children': sorted(child.event_type for child in event.event_children), + 'handler_results': handler_results, + } + ) + return sorted(summary, key=lambda record: record['event_type']) + + +async def _run_scenario( + *, + middlewares: Sequence[Any] = (), + should_fail: bool = False, +) -> list[dict[str, Any]]: + """Execute a simple scenario and return the history summary.""" + bus = EventBus(middlewares=list(middlewares)) + + async def ok_handler(event: HistoryTestEvent) -> str: + return f'ok-{event.payload}' + + async def conditional_handler(event: HistoryTestEvent) -> str: + if event.should_fail: + raise RuntimeError('boom') + return 'fine' + + bus.on('HistoryTestEvent', ok_handler) + bus.on('HistoryTestEvent', conditional_handler) + + try: + await bus.dispatch(HistoryTestEvent(payload='payload', should_fail=should_fail)) + await bus.wait_until_idle() + finally: + summary = _summarize_history(bus.event_history) + await bus.stop() + + return summary + + +@pytest.mark.asyncio +async def test_sqlite_mirror_matches_inmemory_success(tmp_path: Path) -> None: + db_path = tmp_path / 'events_success.sqlite' + in_memory_result = await _run_scenario() + sqlite_result = await _run_scenario(middlewares=[SQLiteHistoryMirrorMiddleware(db_path)]) + assert sqlite_result == in_memory_result + + conn = sqlite3.connect(db_path) + event_phases = conn.execute( + 'SELECT phase FROM events_log ORDER BY id' + ).fetchall() + conn.close() + assert {phase for (phase,) in event_phases} >= {'pending', 'started', 'completed'} + + +@pytest.mark.asyncio +async def test_sqlite_mirror_matches_inmemory_error(tmp_path: Path) -> None: + db_path = tmp_path / 'events_error.sqlite' + in_memory_result = await _run_scenario(should_fail=True) + sqlite_result = await _run_scenario( + middlewares=[SQLiteHistoryMirrorMiddleware(db_path)], + should_fail=True, + ) + assert sqlite_result == in_memory_result + + conn = sqlite3.connect(db_path) + phases = conn.execute('SELECT DISTINCT phase FROM events_log').fetchall() + conn.close() + assert {phase for (phase,) in phases} >= {'pending', 'started', 'completed'} + + +def _worker_dispatch(db_path: str, worker_id: int) -> None: + """Process entrypoint for exercising concurrent writes.""" + + async def run() -> None: + middleware = SQLiteHistoryMirrorMiddleware(Path(db_path)) + bus = EventBus(name=f'WorkerBus{worker_id}', middlewares=[middleware]) + + async def handler(event: HistoryTestEvent) -> str: + return f'worker-{worker_id}' + + bus.on('HistoryTestEvent', handler) + try: + await bus.dispatch(HistoryTestEvent(payload=f'worker-{worker_id}')) + await bus.wait_until_idle() + finally: + await bus.stop() + + asyncio.run(run()) + + +def test_sqlite_mirror_supports_concurrent_processes(tmp_path: Path) -> None: + db_path = tmp_path / 'shared_history.sqlite' + ctx = multiprocessing.get_context('spawn') + processes = [ctx.Process(target=_worker_dispatch, args=(str(db_path), idx)) for idx in range(3)] + for proc in processes: + proc.start() + for proc in processes: + proc.join(timeout=20) + assert proc.exitcode == 0 + + conn = sqlite3.connect(db_path) + events = conn.execute('SELECT DISTINCT eventbus_name FROM events_log').fetchall() + results_count = conn.execute('SELECT COUNT(*) FROM event_results_log').fetchone() + conn.close() + + assert {name for (name,) in events} == {'WorkerBus0', 'WorkerBus1', 'WorkerBus2'} + assert results_count is not None + # Each worker records pending/started/completed for its single handler + assert results_count[0] == 9 diff --git a/tests/test_event_result_standalone.py b/tests/test_event_result_standalone.py new file mode 100644 index 0000000..bf3a457 --- /dev/null +++ b/tests/test_event_result_standalone.py @@ -0,0 +1,92 @@ +from uuid import uuid4 + +import pytest + +from typing import Any, cast + +from bubus.models import BaseEvent, EventHandler, EventResult, get_handler_id +from bubus.service import EventBus + + +class _StubEvent: + """Minimal event-like object used to verify EventResult independence.""" + + def __init__(self): + self.event_id = 'stub-event' + self.event_children: list[BaseEvent | _StubEvent] = [] + self.event_result_type = str + self.event_timeout = 0.5 + self.event_processed_at = None + self.event_results: dict[str, EventResult] = {} + self._cancelled_due_to_error: BaseException | None = None + + def event_cancel_pending_child_processing(self, error: BaseException) -> None: + self._cancelled_due_to_error = error + + +@pytest.mark.asyncio +async def test_event_result_execute_without_base_event() -> None: + """EventResult should execute without requiring a real BaseEvent or EventBus.""" + + stub_event = _StubEvent() + + event_result = EventResult( + event_id=str(uuid4()), + handler_id=str(id(lambda: None)), + handler_name='handler', + eventbus_id=str(id(object())), + eventbus_name='Standalone', + timeout=stub_event.event_timeout, + result_type=str, + ) + + async def handler(event: _StubEvent) -> str: + return 'ok' + + test_bus = EventBus(name='StandaloneTest1') + result_value = await event_result.execute( + cast(BaseEvent[Any], stub_event), + cast(EventHandler, handler), + eventbus=test_bus, + timeout=stub_event.event_timeout, + ) + + assert result_value == 'ok' + assert event_result.status == 'completed' + assert event_result.result == 'ok' + assert stub_event.__dict__.get('_cancelled_due_to_error') is None + await test_bus.stop() + + +class StandaloneEvent(BaseEvent[str]): + data: str + + +@pytest.mark.asyncio +async def test_event_and_result_without_eventbus() -> None: + """Verify BaseEvent + EventResult work without instantiating an EventBus.""" + + event = StandaloneEvent(data='message') + + def handler(evt: StandaloneEvent) -> str: + return evt.data.upper() + + handler_id = get_handler_id(cast(EventHandler, handler), None) + pending_results = event.event_create_pending_results({handler_id: cast(EventHandler, handler)}) + event_result = pending_results[handler_id] + + test_bus = EventBus(name='StandaloneTest2') + value = await event_result.execute( + event, + cast(EventHandler, handler), + eventbus=test_bus, + timeout=event.event_timeout, + ) + + assert value == 'MESSAGE' + assert event_result.status == 'completed' + assert event.event_results[handler_id] is event_result + + event.event_mark_complete_if_all_handlers_completed() + assert event.event_completed_at is not None + await test_bus.stop() diff --git a/tests/test_eventbus.py b/tests/test_eventbus.py index b4cb977..f90c0b1 100644 --- a/tests/test_eventbus.py +++ b/tests/test_eventbus.py @@ -17,14 +17,16 @@ import asyncio import json import os +import sqlite3 import time -from datetime import datetime, timezone +from datetime import datetime, timezone, timedelta from typing import Any import pytest from pydantic import Field -from bubus import BaseEvent, EventBus +from bubus import BaseEvent, EventBus, SQLiteHistoryMirrorMiddleware +from bubus.middlewares import EventBusMiddleware, LoggerEventBusMiddleware, WALEventBusMiddleware class CreateAgentTaskEvent(BaseEvent): @@ -162,6 +164,31 @@ def test_emit_sync(self, mock_agent): assert 'no event loop is running' in str(e.value) assert len(bus.event_history) == 0 + async def test_unbounded_history_disables_capacity_limit(self): + """When max_history_size=None, dispatch should not enforce the 100-event cap.""" + bus = EventBus(name='NoLimitBus', max_history_size=None) + + processed = 0 + + async def slow_handler(event: BaseEvent) -> None: + nonlocal processed + await asyncio.sleep(0.01) + processed += 1 + + bus.on('SlowEvent', slow_handler) + + events: list[BaseEvent] = [] + + try: + for _ in range(150): + events.append(bus.dispatch(BaseEvent(event_type='SlowEvent'))) + + await asyncio.gather(*events) + await bus.wait_until_idle() + assert processed == 150 + finally: + await bus.stop(clear=True) + class TestHandlerRegistration: """Test handler registration and execution""" @@ -291,10 +318,14 @@ def static_method_handler(event: UserActionEvent) -> str: processor1 = EventProcessor('Processor1', 10) processor2 = EventProcessor('Processor2', 20) - # Register instance methods + # Register instance methods (suppress warning about same-named handlers from different instances) + import warnings + eventbus.on(UserActionEvent, processor1.sync_method_handler) eventbus.on(UserActionEvent, processor1.async_method_handler) - eventbus.on(UserActionEvent, processor2.sync_method_handler) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) + eventbus.on(UserActionEvent, processor2.sync_method_handler) # Register class and static methods eventbus.on('UserActionEvent', EventProcessor.class_method_handler) @@ -337,6 +368,56 @@ def static_method_handler(event: UserActionEvent) -> str: assert 'Handled by static method' in results_list +class TestEventForwarding: + """Tests for event forwarding between buses.""" + + @pytest.mark.asyncio + async def test_forwarding_loop_prevention(self): + bus_a = EventBus(name='ForwardBusA') + bus_b = EventBus(name='ForwardBusB') + bus_c = EventBus(name='ForwardBusC') + + class LoopEvent(BaseEvent[str]): + pass + + seen: dict[str, int] = {'A': 0, 'B': 0, 'C': 0} + + async def handler_a(event: LoopEvent) -> str: + seen['A'] += 1 + return 'handled-a' + + async def handler_b(event: LoopEvent) -> str: + seen['B'] += 1 + return 'handled-b' + + async def handler_c(event: LoopEvent) -> str: + seen['C'] += 1 + return 'handled-c' + + bus_a.on(LoopEvent, handler_a) + bus_b.on(LoopEvent, handler_b) + bus_c.on(LoopEvent, handler_c) + + # Create a forwarding cycle A -> B -> C -> A, which should be broken automatically. + bus_a.on('*', bus_b.dispatch) + bus_b.on('*', bus_c.dispatch) + bus_c.on('*', bus_a.dispatch) + + try: + event = await bus_a.dispatch(LoopEvent()) + + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + await bus_c.wait_until_idle() + + assert seen == {'A': 1, 'B': 1, 'C': 1} + assert event.event_path == ['ForwardBusA', 'ForwardBusB', 'ForwardBusC'] + finally: + await bus_a.stop(clear=True) + await bus_b.stop(clear=True) + await bus_c.stop(clear=True) + + class TestFIFOOrdering: """Test FIFO event processing""" @@ -402,6 +483,97 @@ async def working_handler(event: BaseEvent) -> str: assert working_result.result == 'worked' assert results == ['success'] + async def test_raise_if_errors_raises_exception_group_with_all_handler_errors(self, eventbus): + """raise_if_errors() should aggregate all handler failures into ExceptionGroup.""" + + async def failing_handler_one(event: BaseEvent) -> str: + raise ValueError('first failure') + + async def failing_handler_two(event: BaseEvent) -> str: + raise RuntimeError('second failure') + + async def working_handler(event: BaseEvent) -> str: + return 'worked' + + eventbus.on('UserActionEvent', failing_handler_one) + eventbus.on('UserActionEvent', failing_handler_two) + eventbus.on('UserActionEvent', working_handler) + + event = await eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + + with pytest.raises(ExceptionGroup) as exc_info: + await event.raise_if_errors() + + grouped_errors = exc_info.value.exceptions + assert len(grouped_errors) == 2 + assert {type(err) for err in grouped_errors} == {ValueError, RuntimeError} + assert {'first failure', 'second failure'} == {str(err) for err in grouped_errors} + + async def test_raise_if_errors_waits_for_completion(self, eventbus): + """raise_if_errors() should wait for completion when called on pending events.""" + handler_started = asyncio.Event() + + async def delayed_failure(event: BaseEvent) -> str: + handler_started.set() + await asyncio.sleep(0.02) + raise ValueError('delayed failure') + + eventbus.on('UserActionEvent', delayed_failure) + + event = eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + await handler_started.wait() + + with pytest.raises(ExceptionGroup) as exc_info: + await event.raise_if_errors(timeout=1) + + assert len(exc_info.value.exceptions) == 1 + assert isinstance(exc_info.value.exceptions[0], ValueError) + + async def test_raise_if_errors_noop_when_no_errors(self, eventbus): + """raise_if_errors() should return normally when no handler failed.""" + + async def working_handler(event: BaseEvent) -> str: + return 'ok' + + eventbus.on('UserActionEvent', working_handler) + + event = await eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + await event.raise_if_errors() + + async def test_event_result_raises_exception_group_when_multiple_handlers_fail(self, eventbus): + """event_result() should raise ExceptionGroup when multiple handler failures exist.""" + + async def failing_handler_one(event: BaseEvent) -> str: + raise ValueError('first failure') + + async def failing_handler_two(event: BaseEvent) -> str: + raise RuntimeError('second failure') + + eventbus.on('UserActionEvent', failing_handler_one) + eventbus.on('UserActionEvent', failing_handler_two) + + event = await eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + + with pytest.raises(ExceptionGroup) as exc_info: + await event.event_result() + + grouped_errors = exc_info.value.exceptions + assert len(grouped_errors) == 2 + assert {type(err) for err in grouped_errors} == {ValueError, RuntimeError} + + async def test_event_result_single_handler_error_raises_original_exception(self, eventbus): + """event_result() should preserve original exception type when only one handler fails.""" + + async def failing_handler(event: BaseEvent) -> str: + raise ValueError('single failure') + + eventbus.on('UserActionEvent', failing_handler) + + event = await eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + + with pytest.raises(ValueError, match='single failure'): + await event.event_result() + class TestBatchOperations: """Test batch event operations""" @@ -694,7 +866,7 @@ async def test_wal_persistence_handler(self, tmp_path): """Test that events are automatically persisted to WAL file""" # Create event bus with WAL path wal_path = tmp_path / 'test_events.jsonl' - bus = EventBus(name='TestBus', wal_path=wal_path) + bus = EventBus(name='TestBus', middlewares=[WALEventBusMiddleware(wal_path)]) try: # Emit some events @@ -734,7 +906,7 @@ async def test_wal_persistence_creates_parent_dir(self, tmp_path): assert not wal_path.parent.exists() # Create event bus - bus = EventBus(name='TestBus', wal_path=wal_path) + bus = EventBus(name='TestBus', middlewares=[WALEventBusMiddleware(wal_path)]) try: # Emit an event @@ -755,7 +927,7 @@ async def test_wal_persistence_creates_parent_dir(self, tmp_path): async def test_wal_persistence_skips_incomplete_events(self, tmp_path): """Test that WAL persistence only writes completed events""" wal_path = tmp_path / 'incomplete_events.jsonl' - bus = EventBus(name='TestBus', wal_path=wal_path) + bus = EventBus(name='TestBus', middlewares=[WALEventBusMiddleware(wal_path)]) try: # Add a slow handler that will delay completion @@ -789,6 +961,172 @@ async def slow_handler(event: BaseEvent) -> str: await bus.stop() +class TestHandlerMiddleware: + """Tests for the handler middleware pipeline.""" + + async def test_middleware_wraps_successful_handler(self): + calls: list[tuple[str, str]] = [] + + class TrackingMiddleware(EventBusMiddleware): + def __init__(self, call_log: list[tuple[str, str]]): + self.call_log = call_log + + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent, event_result, status): + if status == 'started': + self.call_log.append(('before', event_result.status)) + elif status == 'completed': + self.call_log.append(('after', event_result.status)) + + bus = EventBus(middlewares=[TrackingMiddleware(calls)]) + bus.on('UserActionEvent', lambda event: 'ok') + + try: + completed = await bus.dispatch(UserActionEvent(action='test', user_id='user1')) + await bus.wait_until_idle() + + assert completed.event_results + result = next(iter(completed.event_results.values())) + assert result.status == 'completed' + assert result.result == 'ok' + assert calls == [('before', 'started'), ('after', 'completed')] + finally: + await bus.stop() + + async def test_middleware_observes_handler_errors(self): + observations: list[tuple[str, str]] = [] + + class ErrorMiddleware(EventBusMiddleware): + def __init__(self, log: list[tuple[str, str]]): + self.log = log + + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent, event_result, status): + if status == 'started': + self.log.append(('before', event_result.status)) + elif status == 'completed' and event_result.error: + self.log.append(('error', type(event_result.error).__name__)) + + async def failing_handler(event: BaseEvent) -> None: + raise ValueError('boom') + + bus = EventBus(middlewares=[ErrorMiddleware(observations)]) + bus.on('UserActionEvent', failing_handler) + + try: + event = await bus.dispatch(UserActionEvent(action='fail', user_id='user2')) + await bus.wait_until_idle() + + result = next(iter(event.event_results.values())) + assert result.status == 'error' + assert isinstance(result.error, ValueError) + assert observations == [('before', 'started'), ('error', 'ValueError')] + finally: + await bus.stop() + + +class TestSQLiteHistoryMirror: + async def test_sqlite_history_persists_events_and_results(self, tmp_path): + db_path = tmp_path / 'events.sqlite' + middleware = SQLiteHistoryMirrorMiddleware(db_path) + bus = EventBus(middlewares=[middleware]) + + async def handler(event: BaseEvent) -> str: + return 'ok' + + bus.on('UserActionEvent', handler) + + try: + await bus.dispatch(UserActionEvent(action='ping', user_id='u-1')) + await bus.wait_until_idle() + + conn = sqlite3.connect(db_path) + events = conn.execute( + 'SELECT phase, event_status FROM events_log ORDER BY id' + ).fetchall() + assert [phase for phase, _ in events] == ['pending', 'started', 'completed'] + assert [status for _, status in events] == ['pending', 'started', 'completed'] + + result_rows = conn.execute( + 'SELECT phase, status, result_repr, error_repr FROM event_results_log ORDER BY id' + ).fetchall() + conn.close() + + assert [phase for phase, *_ in result_rows] == ['pending', 'started', 'completed'] + assert [status for _, status, *_ in result_rows] == ['pending', 'started', 'completed'] + assert result_rows[-1][2] == "'ok'" + assert result_rows[-1][3] is None + finally: + await bus.stop() + + +class TestLoggerMiddleware: + async def test_logger_middleware_writes_file(self, tmp_path): + log_path = tmp_path / 'events.log' + bus = EventBus(middlewares=[LoggerEventBusMiddleware(log_path)]) + + async def handler(event: BaseEvent) -> str: + return 'logged' + + bus.on('UserActionEvent', handler) + + try: + await bus.dispatch(UserActionEvent(action='log', user_id='user')) + await bus.wait_until_idle() + + assert log_path.exists() + contents = log_path.read_text().strip().splitlines() + assert contents + assert 'UserActionEvent' in contents[-1] + finally: + await bus.stop() + + async def test_logger_middleware_stdout_only(self, capsys): + bus = EventBus(middlewares=[LoggerEventBusMiddleware()]) + + async def handler(event: BaseEvent) -> str: + return 'stdout' + + bus.on('UserActionEvent', handler) + + try: + await bus.dispatch(UserActionEvent(action='log', user_id='user')) + await bus.wait_until_idle() + + captured = capsys.readouterr() + assert 'UserActionEvent' in captured.out + assert 'stdout' not in captured.err + finally: + await bus.stop() + + async def test_sqlite_history_records_errors(self, tmp_path): + db_path = tmp_path / 'events.sqlite' + middleware = SQLiteHistoryMirrorMiddleware(db_path) + bus = EventBus(middlewares=[middleware]) + + async def failing_handler(event: BaseEvent) -> None: + raise RuntimeError('handler boom') + + bus.on('UserActionEvent', failing_handler) + + try: + await bus.dispatch(UserActionEvent(action='boom', user_id='u-2')) + await bus.wait_until_idle() + + conn = sqlite3.connect(db_path) + result_rows = conn.execute( + 'SELECT phase, status, error_repr FROM event_results_log ORDER BY id' + ).fetchall() + events = conn.execute('SELECT phase, event_status FROM events_log ORDER BY id').fetchall() + conn.close() + + assert [phase for phase, *_ in result_rows] == ['pending', 'started', 'completed'] + assert [status for _, status, *_ in result_rows] == ['pending', 'started', 'error'] + assert 'RuntimeError' in result_rows[-1][2] + assert [phase for phase, _ in events] == ['pending', 'started', 'completed'] + assert [status for _, status in events] == ['pending', 'started', 'completed'] + finally: + await bus.stop() + + class TestEventBusHierarchy: """Test hierarchical EventBus subscription patterns""" @@ -1014,8 +1352,8 @@ async def test_expect_with_predicate(self, eventbus): async def test_expect_timeout(self, eventbus): """Test expect timeout behavior""" # Expect an event that will never come - with pytest.raises(TimeoutError): - await eventbus.expect('NonExistentEvent', timeout=0.1) + result = await eventbus.expect('NonExistentEvent', timeout=0.1) + assert result is None async def test_expect_with_model_class(self, eventbus): """Test expect with model class instead of string""" @@ -1065,10 +1403,8 @@ async def test_expect_handler_cleanup(self, eventbus): initial_handlers = len(eventbus.handlers.get('TestEvent', [])) # Create an expect that times out - try: - await eventbus.expect('TestEvent', timeout=0.1) - except TimeoutError: - pass + result = await eventbus.expect('TestEvent', timeout=0.1) + assert result is None # Handler should be cleaned up assert len(eventbus.handlers.get('TestEvent', [])) == initial_handlers @@ -1106,12 +1442,107 @@ async def slow_handler(event: BaseEvent) -> str: # Wait for expect received = await expect_task - # At this point, the slow handler should have run - # but we receive the event as soon as it matches assert received.event_type == 'SlowEvent' - # The event might not be fully completed yet since expect - # triggers as soon as the event is processed by its handler + assert processing_complete is False + # Slow handler should still be running (or pending) when expect() resolves + slow_result = next( + (res for res in received.event_results.values() if res.handler_name.endswith('slow_handler')), + None, + ) + assert slow_result is not None + assert slow_result.status != 'completed' + + await eventbus.wait_until_idle() + assert processing_complete is True + + +class TestQueryMethod: + """Tests for the query() helper.""" + + async def test_query_returns_most_recent_completed(self, eventbus): + # Dispatch two events and ensure the newest is returned + eventbus.dispatch(UserActionEvent(action='first', user_id='u1')) + latest = eventbus.dispatch(UserActionEvent(action='second', user_id='u2')) + await eventbus.wait_until_idle() + + match = await eventbus.query('UserActionEvent', since=timedelta(seconds=10)) + assert match is not None + assert match.event_id == latest.event_id + + async def test_query_respects_since_window(self, eventbus): + event = eventbus.dispatch(UserActionEvent(action='old', user_id='u1')) + await eventbus.wait_until_idle() + event.event_created_at -= timedelta(seconds=30) + + match = await eventbus.query('UserActionEvent', since=timedelta(seconds=10)) + assert match is None + + async def test_query_skips_incomplete_events(self, eventbus): + processing = asyncio.Event() + + async def slow_handler(evt: UserActionEvent) -> None: + await asyncio.sleep(0.05) + processing.set() + + eventbus.on('UserActionEvent', slow_handler) + + pending_event = eventbus.dispatch(UserActionEvent(action='slow', user_id='u1')) + + # While the handler is running, query should return None + assert await eventbus.query('UserActionEvent', since=timedelta(seconds=10)) is None + + await pending_event + await processing.wait() + + match = await eventbus.query('UserActionEvent', since=timedelta(seconds=10)) + assert match is not None + assert match.event_id == pending_event.event_id + + +class TestDebouncePatterns: + """End-to-end scenarios for debounce-style flows.""" + + class DebounceEvent(BaseEvent): + user_id: int + + async def test_debounce_prefers_recent_history(self, eventbus): + # First event completes + initial = await eventbus.dispatch(self.DebounceEvent(user_id=123)) + await eventbus.wait_until_idle() + + # Compose the debounce pattern: query -> expect -> dispatch + resolved = ( + await eventbus.query(self.DebounceEvent, since=timedelta(seconds=10)) + or await eventbus.expect(self.DebounceEvent, timeout=0.05) + or await eventbus.dispatch(self.DebounceEvent(user_id=123)) + ) + + assert resolved is not None + assert resolved.event_id == initial.event_id + + total_events = sum( + 1 for event in eventbus.event_history.values() if isinstance(event, self.DebounceEvent) + ) + assert total_events == 1 + + async def test_debounce_dispatches_when_recent_missing(self, eventbus): + resolved = ( + await eventbus.query(self.DebounceEvent, since=timedelta(seconds=1)) + or await eventbus.expect(self.DebounceEvent, timeout=0.05) + or await eventbus.dispatch(self.DebounceEvent(user_id=999)) + ) + + assert resolved is not None + assert isinstance(resolved, self.DebounceEvent) + assert resolved.user_id == 999 + + await eventbus.wait_until_idle() + + total_events = sum( + 1 for event in eventbus.event_history.values() if isinstance(event, self.DebounceEvent) + ) + assert total_events == 1 async def test_expect_with_complex_predicate(self, eventbus): """Test expect with complex predicate logic""" events_seen = [] @@ -1151,6 +1582,7 @@ async def test_expect_in_sync_context(self, mock_agent): # Later await the coroutine result = await expect_coroutine + assert result is not None assert result.event_type == 'SyncEvent' await bus.stop() @@ -1339,6 +1771,25 @@ async def bad_handler(event): merged_bad = await event_bad.event_results_flat_dict() assert merged_bad == {} # Empty dict since no dict results + async def test_flat_dict_conflict_raises(self, eventbus): + """event_results_flat_dict() raises by default when handlers conflict.""" + + async def handler_one(event): + return {'shared': 1, 'unique1': 'a'} + + async def handler_two(event): + return {'shared': 2, 'unique2': 'b'} + + eventbus.on('ConflictEvent', handler_one) + eventbus.on('ConflictEvent', handler_two) + + event = await eventbus.dispatch(BaseEvent(event_type='ConflictEvent')) + + with pytest.raises(ValueError) as exc_info: + await event.event_results_flat_dict() + + assert 'overwrite values from previous handlers' in str(exc_info.value) + async def test_flat_list(self, eventbus): """Test event_results_flat_list() concatenation""" diff --git a/tests/test_find.py b/tests/test_find.py new file mode 100644 index 0000000..bce08d7 --- /dev/null +++ b/tests/test_find.py @@ -0,0 +1,1507 @@ +""" +Tests for the unified find() method and tree traversal helpers. + +Addresses GitHub Issues #10 (debouncing) and #15 (expect past + child_of). +""" + +# pyright: reportUnknownMemberType=false +# pyright: reportUnknownLambdaType=false +# pyright: reportAttributeAccessIssue=false +# pyright: reportUnknownVariableType=false +# pyright: reportUnusedVariable=false + +import asyncio +from datetime import UTC, datetime + +import pytest + +from bubus import BaseEvent, EventBus + + +# Test event types +class ParentEvent(BaseEvent[str]): + pass + + +class ChildEvent(BaseEvent[str]): + pass + + +class GrandchildEvent(BaseEvent[str]): + pass + + +class UnrelatedEvent(BaseEvent[str]): + pass + + +class ScreenshotEvent(BaseEvent[str]): + """Example event for debouncing tests.""" + + target_id: str = '' + full_page: bool = False + + +class NavigateEvent(BaseEvent[str]): + """Example event for race condition tests.""" + + url: str = '' + + +class TabCreatedEvent(BaseEvent[str]): + """Example event that fires as result of navigation.""" + + tab_id: str = '' + + +# ============================================================================= +# Tree Traversal Helper Tests +# ============================================================================= + + +class TestEventIsChildOf: + """Tests for event_is_child_of() method.""" + + async def test_direct_child_returns_true(self): + """event_is_child_of returns True for direct parent-child relationship.""" + bus = EventBus() + + try: + # Create parent-child relationship via dispatch inside handler + child_event_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_event_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + child = child_event_ref[0] + + # Verify the relationship + assert bus.event_is_child_of(child, parent) is True + + finally: + await bus.stop(clear=True) + + async def test_grandchild_returns_true(self): + """event_is_child_of returns True for grandparent relationship.""" + bus = EventBus() + + try: + grandchild_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + grandchild = await bus.dispatch(GrandchildEvent()) + grandchild_ref.append(grandchild) + return 'child_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, lambda e: 'grandchild_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + grandchild = grandchild_ref[0] + + # Grandchild should be descendant of parent + assert bus.event_is_child_of(grandchild, parent) is True + + finally: + await bus.stop(clear=True) + + async def test_unrelated_events_returns_false(self): + """event_is_child_of returns False for unrelated events.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'parent_done') + bus.on(UnrelatedEvent, lambda e: 'unrelated_done') + + parent = await bus.dispatch(ParentEvent()) + unrelated = await bus.dispatch(UnrelatedEvent()) + + assert bus.event_is_child_of(unrelated, parent) is False + + finally: + await bus.stop(clear=True) + + async def test_same_event_returns_false(self): + """event_is_child_of returns False when checking event against itself.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + event = await bus.dispatch(ParentEvent()) + + assert bus.event_is_child_of(event, event) is False + + finally: + await bus.stop(clear=True) + + async def test_reversed_relationship_returns_false(self): + """event_is_child_of returns False when parent/child are reversed.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + child = child_ref[0] + + # Parent is NOT a child of child + assert bus.event_is_child_of(parent, child) is False + + finally: + await bus.stop(clear=True) + + +class TestEventIsParentOf: + """Tests for event_is_parent_of() method.""" + + async def test_direct_parent_returns_true(self): + """event_is_parent_of returns True for direct parent-child relationship.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + child = child_ref[0] + + # Parent IS parent of child + assert bus.event_is_parent_of(parent, child) is True + + finally: + await bus.stop(clear=True) + + async def test_grandparent_returns_true(self): + """event_is_parent_of returns True for grandparent relationship.""" + bus = EventBus() + + try: + grandchild_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + grandchild = await bus.dispatch(GrandchildEvent()) + grandchild_ref.append(grandchild) + return 'child_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, lambda e: 'grandchild_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + grandchild = grandchild_ref[0] + + # Parent IS ancestor of grandchild + assert bus.event_is_parent_of(parent, grandchild) is True + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# find() Basic Functionality Tests +# ============================================================================= + + +class TestFindPastOnly: + """Tests for find(past=True, future=False) - equivalent to query().""" + + async def test_returns_matching_event_from_history(self): + """find(past=True, future=False) returns event from history.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.dispatch(ParentEvent()) + + # Find it in history (past=True = search all history) + found = await bus.find(ParentEvent, past=True, future=False) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_float_filters_by_time_window(self): + """find(past=0.1) only returns events from last 0.1 seconds.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an event + _old_event = await bus.dispatch(ParentEvent()) + + # Wait a bit + await asyncio.sleep(0.15) + + # Dispatch another event + new_event = await bus.dispatch(ParentEvent()) + + # With a very short past window, should only find the new event + found = await bus.find(ParentEvent, past=0.1, future=False) + assert found is not None + assert found.event_id == new_event.event_id + + # With a longer past window, should still find new event (most recent first) + found = await bus.find(ParentEvent, past=1.0, future=False) + assert found is not None + assert found.event_id == new_event.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_float_returns_none_when_all_events_too_old(self): + """find(past=0.05) returns None if all events are older than 0.05 seconds.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an event + await bus.dispatch(ParentEvent()) + + # Wait longer than our window + await asyncio.sleep(0.15) + + # With very short past window, should find nothing + found = await bus.find(ParentEvent, past=0.05, future=False) + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_returns_none_when_no_match(self): + """find(past=True, future=False) returns None when no matching event.""" + bus = EventBus() + + try: + # No events dispatched + found = await bus.find(ParentEvent, past=True, future=False) + + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_respects_where_filter(self): + """find() applies where filter correctly.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch two events with different target_ids + await bus.dispatch(ScreenshotEvent(target_id='tab1')) + event2 = await bus.dispatch(ScreenshotEvent(target_id='tab2')) + + # Find only the one with target_id='tab2' + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab2', + past=True, + future=False, + ) + + assert found is not None + assert found.event_id == event2.event_id + + finally: + await bus.stop(clear=True) + + async def test_returns_most_recent_match(self): + """find() returns most recent matching event from history.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch multiple events + await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.01) # Ensure different timestamps + event2 = await bus.dispatch(ParentEvent()) + + # Should return the most recent + found = await bus.find(ParentEvent, past=True, future=False) + + assert found is not None + assert found.event_id == event2.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_ignores_in_progress_until_event_completes(self): + """History search should only return completed events, never in-progress ones.""" + bus = EventBus() + + try: + release_handler = asyncio.Event() + + async def slow_handler(event: ParentEvent) -> str: + await release_handler.wait() + return 'done' + + bus.on(ParentEvent, slow_handler) + + dispatched = bus.dispatch(ParentEvent()) + await asyncio.sleep(0.02) # Let handler start. + + # In-progress event should not be returned by history search. + found_while_running = await bus.find(ParentEvent, past=True, future=False) + assert found_while_running is None + + release_handler.set() + await dispatched + await bus.wait_until_idle() + + found_after_completion = await bus.find(ParentEvent, past=True, future=False) + assert found_after_completion is not None + assert found_after_completion.event_id == dispatched.event_id + finally: + await bus.stop(clear=True) + + +class TestFindFutureOnly: + """Tests for find(past=False, future=...) - equivalent to expect().""" + + async def test_waits_for_future_event(self): + """find(past=False, future=1) waits for event to be dispatched.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Start waiting for event + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.dispatch(ParentEvent()) + + find_task = asyncio.create_task( + bus.find(ParentEvent, past=False, future=1) + ) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_future_float_timeout(self): + """find(future=0.01) times out quickly when no event.""" + bus = EventBus() + + try: + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=False, future=0.01) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + assert elapsed < 0.1 # Should timeout quickly + + finally: + await bus.stop(clear=True) + + async def test_ignores_past_events(self): + """find(past=False, future=...) ignores events already in history.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + await bus.dispatch(ParentEvent()) + + # Should NOT find it (past=False), and timeout quickly + found = await bus.find(ParentEvent, past=False, future=0.01) + + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_future_works_with_string_event_type(self): + """find('EventName', ...) resolves using string keys, not just model classes.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.dispatch(ParentEvent()) + + find_task = asyncio.create_task(bus.find('ParentEvent', past=False, future=1)) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + assert found.event_type == 'ParentEvent' + finally: + await bus.stop(clear=True) + + async def test_multiple_concurrent_find_waiters_resolve_correct_events(self): + """Concurrent find() waiters should each resolve to the correct event.""" + bus = EventBus() + + try: + # Keep one permanent handler so we can assert temporary find handlers are cleaned up. + bus.on(ScreenshotEvent, lambda e: 'done') + baseline_handler_count = len(bus.handlers.get('ScreenshotEvent', [])) + + wait_for_a = asyncio.create_task( + bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab-a', + past=False, + future=1, + ) + ) + wait_for_b = asyncio.create_task( + bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab-b', + past=False, + future=1, + ) + ) + + await asyncio.sleep(0.02) + event_a = await bus.dispatch(ScreenshotEvent(target_id='tab-a')) + event_b = await bus.dispatch(ScreenshotEvent(target_id='tab-b')) + + found_a, found_b = await asyncio.gather(wait_for_a, wait_for_b) + + assert found_a is not None + assert found_b is not None + assert found_a.event_id == event_a.event_id + assert found_b.event_id == event_b.event_id + + # All temporary find handlers should be removed. + assert len(bus.handlers.get('ScreenshotEvent', [])) == baseline_handler_count + finally: + await bus.stop(clear=True) + + async def test_find_future_resolves_before_handlers_complete(self): + """find(future=...) resolves on dispatch, before slow handlers complete.""" + bus = EventBus() + + try: + processing_complete = False + + async def slow_handler(event: ParentEvent) -> str: + nonlocal processing_complete + await asyncio.sleep(0.1) + processing_complete = True + return 'done' + + bus.on(ParentEvent, slow_handler) + + find_task = asyncio.create_task(bus.find(ParentEvent, past=False, future=1)) + await asyncio.sleep(0.01) + + dispatched = bus.dispatch(ParentEvent()) + found = await find_task + + assert found is not None + assert found.event_id == dispatched.event_id + assert processing_complete is False + assert found.event_status in ('pending', 'started') + + await bus.wait_until_idle() + assert processing_complete is True + finally: + await bus.stop(clear=True) + + +class TestFindNeitherPastNorFuture: + """Tests for find(past=False, future=False) - should return None.""" + + async def test_returns_none_immediately(self): + """find(past=False, future=False) returns None immediately.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event + await bus.dispatch(ParentEvent()) + + # With both past and future disabled, should return None + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=False, future=False) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + assert elapsed < 0.1 # Should be instant + + finally: + await bus.stop(clear=True) + + +class TestFindPastAndFuture: + """Tests for find(past=..., future=...) - combined search.""" + + async def test_returns_past_event_immediately(self): + """find(past=True, future=5) returns past event without waiting.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.dispatch(ParentEvent()) + + # Should find it immediately from history + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=True, future=5) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is not None + assert found.event_id == dispatched.event_id + assert elapsed < 0.1 # Should be nearly instant + + finally: + await bus.stop(clear=True) + + async def test_waits_for_future_when_no_past_match(self): + """find(past=True, future=1) waits for future if no past match.""" + bus = EventBus() + + try: + bus.on(ChildEvent, lambda e: 'done') + + # Different event type in history + bus.on(ParentEvent, lambda e: 'done') + await bus.dispatch(ParentEvent()) + + # Start waiting for ChildEvent (not in history) + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.dispatch(ChildEvent()) + + find_task = asyncio.create_task( + bus.find(ChildEvent, past=True, future=1) + ) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_and_future_independent_control(self): + """past=0.05, future=0.05 uses different windows for each.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.15) + + # With short past window (0.05s), old event won't be found + # With short future window (0.05s), will timeout + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=0.05, future=0.05) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + # Should have waited ~0.05s for future + assert 0.04 < elapsed < 0.15 + + finally: + await bus.stop(clear=True) + + async def test_past_true_future_float(self): + """past=True searches all history, future=0.1 waits up to 0.1s.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + dispatched = await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.15) + + # past=True should find the old event (no time window) + found = await bus.find(ParentEvent, past=True, future=0.1) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_float_future_true_would_wait_forever(self): + """past=0.05 with old events + future=True - verify past window works.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.15) + + # past=0.05 won't find old event, but we dispatch a new one + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.dispatch(ParentEvent()) + + find_task = asyncio.create_task( + bus.find(ParentEvent, past=0.05, future=1) + ) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + # Should find the new event from future wait + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# find() with child_of Tests +# ============================================================================= + + +class TestFindWithChildOf: + """Tests for find() with child_of parameter.""" + + async def test_returns_child_of_specified_parent(self): + """find(child_of=parent) returns event that is child of parent.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + # Find child of parent + found = await bus.find(ChildEvent, child_of=parent, past=True, future=False) + + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_returns_none_for_non_child(self): + """find(child_of=parent) returns None if event is not a child.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'parent_done') + bus.on(UnrelatedEvent, lambda e: 'unrelated_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.dispatch(UnrelatedEvent()) + + # Should not find UnrelatedEvent as child of parent + found = await bus.find( + UnrelatedEvent, child_of=parent, past=True, future=False + ) + + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_finds_grandchild(self): + """find(child_of=grandparent) returns grandchild event.""" + bus = EventBus() + + try: + grandchild_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + grandchild = await bus.dispatch(GrandchildEvent()) + grandchild_ref.append(grandchild) + return 'child_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, lambda e: 'grandchild_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + # Find grandchild of parent + found = await bus.find( + GrandchildEvent, child_of=parent, past=True, future=False + ) + + assert found is not None + assert found.event_id == grandchild_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_child_of_works_across_forwarded_buses(self): + """find(child_of=parent) works when events are forwarded across buses.""" + main_bus = EventBus(name='MainBus') + auth_bus = EventBus(name='AuthBus') + + try: + child_ref: list[BaseEvent] = [] + + # Forward ParentEvent from main_bus to auth_bus + main_bus.on(ParentEvent, auth_bus.dispatch) + + # auth_bus handles ParentEvent and dispatches a ChildEvent + async def auth_handler(event: ParentEvent) -> str: + child = await auth_bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'auth_done' + + auth_bus.on(ParentEvent, auth_handler) + auth_bus.on(ChildEvent, lambda e: 'child_done') + + # Dispatch on main_bus, which forwards to auth_bus + parent = await main_bus.dispatch(ParentEvent()) + await main_bus.wait_until_idle() + await auth_bus.wait_until_idle() + + # Find child event on auth_bus using parent from main_bus + found = await auth_bus.find( + ChildEvent, child_of=parent, past=5, future=5 + ) + + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await main_bus.stop(clear=True) + await auth_bus.stop(clear=True) + + +# ============================================================================= +# expect() Backwards Compatibility Tests +# ============================================================================= + + +class TestExpectBackwardsCompatibility: + """Tests to ensure expect() still works with old API.""" + + async def test_expect_waits_for_future_event(self): + """expect() still waits for future events (existing behavior).""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.dispatch(ParentEvent()) + + expect_task = asyncio.create_task(bus.expect(ParentEvent, timeout=1)) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(expect_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_expect_with_include_filter(self): + """expect() with include parameter still works.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + async def dispatch_events(): + await asyncio.sleep(0.02) + await bus.dispatch(ScreenshotEvent(target_id='wrong')) + await asyncio.sleep(0.02) + return await bus.dispatch(ScreenshotEvent(target_id='correct')) + + expect_task = asyncio.create_task( + bus.expect( + ScreenshotEvent, + include=lambda e: e.target_id == 'correct', + timeout=1, + ) + ) + dispatch_task = asyncio.create_task(dispatch_events()) + + found, dispatched = await asyncio.gather(expect_task, dispatch_task) + + assert found is not None + assert found.target_id == 'correct' + + finally: + await bus.stop(clear=True) + + async def test_expect_with_exclude_filter(self): + """expect() with exclude parameter still works.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + async def dispatch_events(): + await asyncio.sleep(0.02) + await bus.dispatch(ScreenshotEvent(target_id='excluded')) + await asyncio.sleep(0.02) + return await bus.dispatch(ScreenshotEvent(target_id='included')) + + expect_task = asyncio.create_task( + bus.expect( + ScreenshotEvent, + exclude=lambda e: e.target_id == 'excluded', + timeout=1, + ) + ) + dispatch_task = asyncio.create_task(dispatch_events()) + + found, dispatched = await asyncio.gather(expect_task, dispatch_task) + + assert found is not None + assert found.target_id == 'included' + + finally: + await bus.stop(clear=True) + + async def test_expect_with_past_true(self): + """expect(past=True) finds already-dispatched events.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.dispatch(ParentEvent()) + + # expect with past=True should find it + found = await bus.expect(ParentEvent, past=True, timeout=5) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_expect_with_past_float(self): + """expect(past=5.0) searches last 5 seconds of history.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.dispatch(ParentEvent()) + + # expect with past=5.0 should find recent event + found = await bus.expect(ParentEvent, past=5.0, timeout=1) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_expect_with_child_of(self): + """expect(child_of=parent) filters by parent relationship.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + # expect with child_of and past=True + found = await bus.expect(ChildEvent, child_of=parent, past=True, timeout=5) + + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# Debouncing Pattern Tests (Issue #10) +# ============================================================================= + + +class TestDebouncingPattern: + """Tests for the debouncing pattern: find() or dispatch().""" + + async def test_returns_existing_fresh_event(self): + """Pattern returns existing event when fresh.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch a screenshot + original = await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Use debouncing pattern - should return the existing event + is_fresh = lambda e: (datetime.now(UTC) - e.event_completed_at).seconds < 5 + result = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1' and is_fresh(e), + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + assert result.event_id == original.event_id + + finally: + await bus.stop(clear=True) + + async def test_dispatches_new_when_no_match(self): + """Pattern dispatches new event when no matching event in history.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # No existing events - should dispatch new + result = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + assert result is not None + assert result.target_id == 'tab1' + assert result.event_status == 'completed' + + finally: + await bus.stop(clear=True) + + async def test_dispatches_new_when_stale(self): + """Pattern dispatches new event when existing is stale.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch an event + await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Filter that marks all events as stale + is_fresh = lambda e: False # Nothing is fresh + + result = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1' and is_fresh(e), + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Should be a new event (different ID) + assert result is not None + # Both events should be in history now + screenshots = [ + e for e in bus.event_history.values() if isinstance(e, ScreenshotEvent) + ] + assert len(screenshots) == 2 + + finally: + await bus.stop(clear=True) + + async def test_find_past_only_returns_immediately_without_waiting(self): + """find(past=True, future=False) returns immediately, never waits.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # No events in history - find should return None instantly + start = datetime.now(UTC) + result = await bus.find(ParentEvent, past=True, future=False) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert result is None + assert elapsed < 0.05 # Should be nearly instant (< 50ms) + + finally: + await bus.stop(clear=True) + + async def test_find_past_float_returns_immediately_without_waiting(self): + """find(past=5, future=False) returns immediately, never waits.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # No events in history - find should return None instantly + start = datetime.now(UTC) + result = await bus.find(ParentEvent, past=5, future=False) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert result is None + assert elapsed < 0.05 # Should be nearly instant (< 50ms) + + finally: + await bus.stop(clear=True) + + async def test_or_chain_without_waiting_finds_existing(self): + """Or-chain pattern finds existing events without blocking.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch first event + original = await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Or-chain should find existing event instantly + start = datetime.now(UTC) + result = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + elapsed = (datetime.now(UTC) - start).total_seconds() + + # Should return existing event + assert result.event_id == original.event_id + # Should be fast (no waiting) + assert elapsed < 0.1 + + finally: + await bus.stop(clear=True) + + async def test_or_chain_without_waiting_dispatches_when_no_match(self): + """Or-chain pattern dispatches new event when no match, still fast.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # No matching events - should dispatch new one + start = datetime.now(UTC) + result = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + elapsed = (datetime.now(UTC) - start).total_seconds() + + # Should have dispatched new event + assert result is not None + assert result.target_id == 'tab1' + # Should be fast (find returned None immediately, then dispatch ran) + assert elapsed < 0.1 + + finally: + await bus.stop(clear=True) + + async def test_or_chain_multiple_sequential_lookups(self): + """Multiple or-chain lookups work without blocking.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Multiple sequential debouncing calls + start = datetime.now(UTC) + + # First call - dispatches new + result1 = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Second call - finds existing + result2 = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Third call - dispatches new (different target) + result3 = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab2', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab2')) + + elapsed = (datetime.now(UTC) - start).total_seconds() + + # First two should be same event + assert result1.event_id == result2.event_id + # Third should be different + assert result3.event_id != result1.event_id + assert result3.target_id == 'tab2' + # All operations should be fast + assert elapsed < 0.2 + + finally: + await bus.stop(clear=True) + + async def test_find_without_await_is_a_coroutine(self): + """find() without await returns a coroutine that can be awaited.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Call find without await - should return a coroutine + coro = bus.find(ParentEvent, past=True, future=False) + + # Verify it's a coroutine + import inspect + + assert inspect.iscoroutine(coro) + + # Now await it + result = await coro + + assert result is None + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# Race Condition Fix Tests (Issue #15) +# ============================================================================= + + +class TestRaceConditionFix: + """Tests for the race condition fix where event fires before expect().""" + + async def test_find_catches_already_fired_event(self): + """find(past=True) catches event that fired before the call.""" + bus = EventBus() + + try: + tab_ref: list[BaseEvent] = [] + + async def navigate_handler(event: NavigateEvent) -> str: + # This synchronously creates the tab event + tab = await bus.dispatch(TabCreatedEvent(tab_id='new_tab')) + tab_ref.append(tab) + return 'navigate_done' + + bus.on(NavigateEvent, navigate_handler) + bus.on(TabCreatedEvent, lambda e: 'tab_created') + + # Dispatch navigation - tab event fires during handler + nav_event = await bus.dispatch(NavigateEvent(url='https://example.com')) + + # By now TabCreatedEvent has already fired + # Using find(past=True) should catch it + found = await bus.find( + TabCreatedEvent, child_of=nav_event, past=True, future=False + ) + + assert found is not None + assert found.event_id == tab_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_child_of_filters_to_correct_parent(self): + """child_of correctly filters to events from the right parent.""" + bus = EventBus() + + try: + async def navigate_handler(event: NavigateEvent) -> str: + await bus.dispatch(TabCreatedEvent(tab_id=f'tab_for_{event.url}')) + return 'navigate_done' + + bus.on(NavigateEvent, navigate_handler) + bus.on(TabCreatedEvent, lambda e: 'tab_created') + + # Two navigations, each creates a tab + nav1 = await bus.dispatch(NavigateEvent(url='site1')) + nav2 = await bus.dispatch(NavigateEvent(url='site2')) + + # Find tab created by nav1 specifically + tab1 = await bus.find( + TabCreatedEvent, child_of=nav1, past=True, future=False + ) + + # Find tab created by nav2 specifically + tab2 = await bus.find( + TabCreatedEvent, child_of=nav2, past=True, future=False + ) + + assert tab1 is not None + assert tab2 is not None + assert tab1.tab_id == 'tab_for_site1' + assert tab2.tab_id == 'tab_for_site2' + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# New Parameter Combination Tests +# ============================================================================= + + +class TestNewParameterCombinations: + """Tests for the new bool | float parameter combinations.""" + + async def test_past_true_future_false_searches_all_history(self): + """past=True, future=False searches all history instantly.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event and wait + dispatched = await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.1) + + # Should find old event with past=True + found = await bus.find(ParentEvent, past=True, future=False) + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_float_future_false_filters_by_age(self): + """past=0.05, future=False only searches last 0.05 seconds.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event + await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.1) # Make it old + + # past=0.05 means "events in last 0.05 seconds" = nothing old + found = await bus.find(ParentEvent, past=0.05, future=False) + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_past_false_future_float_waits_for_timeout(self): + """past=False, future=0.05 waits up to 0.05 seconds.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=False, future=0.05) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + assert 0.04 < elapsed < 0.15 # Should wait ~0.05s + + finally: + await bus.stop(clear=True) + + async def test_past_true_future_true_searches_all_and_waits_forever(self): + """past=True, future=True searches all history, would wait forever.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + dispatched = await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.1) + + # past=True should find the old event immediately + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=True, future=True) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is not None + assert found.event_id == dispatched.event_id + assert elapsed < 0.1 # Should be instant (found in past) + + finally: + await bus.stop(clear=True) + + async def test_find_with_where_and_past_float(self): + """where filter combined with past=float works correctly.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch events with different target_ids + await bus.dispatch(ScreenshotEvent(target_id='tab1')) + await asyncio.sleep(0.15) + event2 = await bus.dispatch(ScreenshotEvent(target_id='tab2')) + + # Find with both where filter and past window + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab2', + past=0.1, # Only search last 0.1 seconds + future=False, + ) + assert found is not None + assert found.event_id == event2.event_id + + # tab1 is too old for the past window + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=0.1, + future=False, + ) + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_find_with_child_of_and_past_float(self): + """child_of filter combined with past=float works correctly.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + # Find child with past window - should work since event is fresh + found = await bus.find( + ChildEvent, + child_of=parent, + past=5, # 5 second window + future=False, + ) + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_find_with_all_parameters(self): + """All parameters combined work correctly.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ScreenshotEvent(target_id='child_tab')) + child_ref.append(child) + return 'done' + + bus.on(ParentEvent, parent_handler) + bus.on(ScreenshotEvent, lambda e: 'done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + # Find with all parameters + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'child_tab', + child_of=parent, + past=5, + future=False, + ) + assert found is not None + assert found.event_id == child_ref[0].event_id + assert found.target_id == 'child_tab' + + finally: + await bus.stop(clear=True) + + +if __name__ == '__main__': + pytest.main([__file__, '-v', '-s']) diff --git a/tests/test_handler_timeout.py b/tests/test_handler_timeout.py index 3952429..8b7ba77 100644 --- a/tests/test_handler_timeout.py +++ b/tests/test_handler_timeout.py @@ -176,3 +176,206 @@ async def test_nested_timeout_scenario_from_issue(): # # assert 'ChildEvent' in str(exc_info.value) or 'ChildEvent' in str(exc_info.value) await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_handler_timeout_marks_error_and_other_handlers_still_complete(): + """Focused timeout behavior: one handler times out, another still completes.""" + bus = EventBus(name='TimeoutFocusedBus') + + class TimeoutFocusedEvent(BaseEvent[str]): + event_timeout: float | None = 0.01 + + execution_order: list[str] = [] + + async def slow_handler(event: TimeoutFocusedEvent) -> str: + execution_order.append('slow_start') + await asyncio.sleep(0.05) + execution_order.append('slow_end') + return 'slow' + + async def fast_handler(event: TimeoutFocusedEvent) -> str: + execution_order.append('fast_start') + return 'fast' + + bus.on(TimeoutFocusedEvent, slow_handler) + bus.on(TimeoutFocusedEvent, fast_handler) + + try: + event = await bus.dispatch(TimeoutFocusedEvent()) + await bus.wait_until_idle() + + slow_result = next((r for r in event.event_results.values() if r.handler_name.endswith('slow_handler')), None) + fast_result = next((r for r in event.event_results.values() if r.handler_name.endswith('fast_handler')), None) + + assert slow_result is not None + assert slow_result.status == 'error' + assert isinstance(slow_result.error, TimeoutError) + + assert fast_result is not None + assert fast_result.status == 'completed' + assert fast_result.result == 'fast' + assert 'fast_start' in execution_order + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_multi_bus_timeout_is_recorded_on_target_bus(): + """Closest Python equivalent: same event dispatched to two buses, timeout on target bus is captured.""" + bus_a = EventBus(name='MultiTimeoutA') + bus_b = EventBus(name='MultiTimeoutB') + + class MultiBusTimeoutEvent(BaseEvent[str]): + event_timeout: float | None = 0.01 + + async def slow_target_handler(event: MultiBusTimeoutEvent) -> str: + await asyncio.sleep(0.05) + return 'slow' + + bus_b.on(MultiBusTimeoutEvent, slow_target_handler) + + try: + event = MultiBusTimeoutEvent() + bus_a.dispatch(event) + bus_b.dispatch(event) + await bus_b.wait_until_idle() + + bus_b_result = next((r for r in event.event_results.values() if r.eventbus_name == bus_b.name), None) + assert bus_b_result is not None + assert bus_b_result.status == 'error' + assert isinstance(bus_b_result.error, TimeoutError) + assert event.event_path == ['MultiTimeoutA', 'MultiTimeoutB'] + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_followup_event_runs_after_parent_timeout_in_queue_jump_path(): + """ + Regression guard: timeout in a handler that awaited a child event should not + stall subsequent events on the same bus. + """ + bus = EventBus(name='TimeoutQueueJumpFollowupBus') + + class ParentEvent(BaseEvent[str]): + event_timeout: float | None = 0.02 + + class ChildEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + + class TailEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + + tail_runs = 0 + + async def child_handler(event: ChildEvent) -> str: + await asyncio.sleep(0.001) + return 'child_done' + + async def parent_handler(event: ParentEvent) -> str: + child = bus.dispatch(ChildEvent()) + await child + await asyncio.sleep(0.05) # Exceeds parent timeout + return 'parent_done' + + async def tail_handler(event: TailEvent) -> str: + nonlocal tail_runs + tail_runs += 1 + return 'tail_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(TailEvent, tail_handler) + + try: + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + parent_result = next(iter(parent.event_results.values())) + assert parent_result.status == 'error' + assert isinstance(parent_result.error, TimeoutError) + + tail = bus.dispatch(TailEvent()) + completed_tail = await asyncio.wait_for(tail, timeout=1.0) + assert completed_tail.event_status == 'completed' + assert tail_runs == 1 + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_forwarded_timeout_path_does_not_stall_followup_events(): + """ + Regression guard: if a forwarded awaited child times out, subsequent events + should still run on both source and target buses. + """ + bus_a = EventBus(name='TimeoutForwardA') + bus_b = EventBus(name='TimeoutForwardB') + + class ParentEvent(BaseEvent[str]): + event_timeout: float | None = 0.02 + + class ChildEvent(BaseEvent[str]): + event_timeout: float | None = 0.01 + + class TailEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + + bus_a_tail_runs = 0 + bus_b_tail_runs = 0 + child_ref: ChildEvent | None = None + + async def parent_handler(event: ParentEvent) -> str: + nonlocal child_ref + child = bus_a.dispatch(ChildEvent()) + child_ref = child + await child + return 'parent_done' + + async def slow_child_handler(event: ChildEvent) -> str: + await asyncio.sleep(0.05) # Guaranteed timeout on child. + return 'child_done' + + async def tail_handler_a(event: TailEvent) -> str: + nonlocal bus_a_tail_runs + bus_a_tail_runs += 1 + return 'tail_a' + + async def tail_handler_b(event: TailEvent) -> str: + nonlocal bus_b_tail_runs + bus_b_tail_runs += 1 + return 'tail_b' + + bus_a.on(ParentEvent, parent_handler) + bus_a.on(TailEvent, tail_handler_a) + bus_a.on('*', bus_b.dispatch) + bus_b.on(ChildEvent, slow_child_handler) + bus_b.on(TailEvent, tail_handler_b) + + try: + parent = await bus_a.dispatch(ParentEvent()) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + + parent_result = next(iter(parent.event_results.values())) + assert parent_result.status == 'completed' + + assert child_ref is not None + assert any( + isinstance(result.error, TimeoutError) for result in child_ref.event_results.values() + ), child_ref.event_results + + # Lock/queue state should remain healthy after timeout. + tail = bus_a.dispatch(TailEvent()) + completed_tail = await asyncio.wait_for(tail, timeout=1.0) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + + assert completed_tail.event_status == 'completed' + assert bus_a_tail_runs == 1 + assert bus_b_tail_runs == 1 + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) diff --git a/tests/test_name_conflict_gc.py b/tests/test_name_conflict_gc.py index 0e42655..0f1448f 100644 --- a/tests/test_name_conflict_gc.py +++ b/tests/test_name_conflict_gc.py @@ -6,11 +6,12 @@ name conflicts when creating new instances with the same name. """ +import asyncio import weakref import pytest -from bubus import EventBus +from bubus import BaseEvent, EventBus class TestNameConflictGC: @@ -19,94 +20,110 @@ class TestNameConflictGC: def test_name_conflict_with_live_reference(self): """Test that name conflict generates a warning and auto-generates a unique name""" # Create an EventBus with a specific name - bus1 = EventBus(name='TestBus') + bus1 = EventBus(name='GCTestConflict') # Try to create another with the same name - should warn and auto-generate unique name - with pytest.warns(UserWarning, match='EventBus with name "TestBus" already exists'): - bus2 = EventBus(name='TestBus') + with pytest.warns(UserWarning, match='EventBus with name "GCTestConflict" already exists'): + bus2 = EventBus(name='GCTestConflict') # The second bus should have a unique name - assert bus2.name.startswith('TestBus_') - assert bus2.name != 'TestBus' - assert len(bus2.name) == len('TestBus_') + 8 # Original name + underscore + 8 char suffix + assert bus2.name.startswith('GCTestConflict_') + assert bus2.name != 'GCTestConflict' + assert len(bus2.name) == len('GCTestConflict_') + 8 # Original name + underscore + 8 char suffix def test_name_no_conflict_after_deletion(self): - """Test that name conflict is NOT raised after the existing bus is deleted""" + """Test that name conflict is NOT raised after the existing bus is deleted and GC runs""" + import gc + # Create an EventBus with a specific name - bus1 = EventBus(name='TestBus') + bus1 = EventBus(name='GCTestBus1') - # Delete the reference + # Delete the reference and force GC del bus1 + gc.collect() # Force garbage collection to release the WeakSet reference - # Creating another with the same name should work since the first one has no references - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' + # Creating another with the same name should work since the first one was collected + bus2 = EventBus(name='GCTestBus1') + assert bus2.name == 'GCTestBus1' def test_name_no_conflict_with_no_reference(self): """Test that name conflict is NOT raised when the existing bus was never assigned""" + import gc + # Create an EventBus with a specific name but don't keep a reference - EventBus(name='TestBus') # No assignment, will be garbage collected + EventBus(name='GCTestBus2') # No assignment, will be garbage collected + gc.collect() # Force garbage collection # Creating another with the same name should work since the first one is gone - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' + bus2 = EventBus(name='GCTestBus2') + assert bus2.name == 'GCTestBus2' def test_name_conflict_with_weak_reference_only(self): """Test that name conflict is NOT raised when only weak references exist""" + import gc + # Create an EventBus and keep only a weak reference - bus1 = EventBus(name='TestBus') + bus1 = EventBus(name='GCTestBus3') weak_ref = weakref.ref(bus1) # Verify the weak reference works assert weak_ref() is bus1 - # Delete the strong reference + # Delete the strong reference and force GC del bus1 + gc.collect() # Force garbage collection # At this point, only the weak reference exists (and the WeakSet reference) # Creating another with the same name should work - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' + bus2 = EventBus(name='GCTestBus3') + assert bus2.name == 'GCTestBus3' # The weak reference should now return None assert weak_ref() is None def test_multiple_buses_with_gc(self): """Test multiple EventBus instances with some being garbage collected""" + import gc + # Create multiple buses, some with strong refs, some without - bus1 = EventBus(name='Bus1') - EventBus(name='Bus2') # Will be GC'd - bus3 = EventBus(name='Bus3') - EventBus(name='Bus4') # Will be GC'd + bus1 = EventBus(name='GCMulti1') + EventBus(name='GCMulti2') # Will be GC'd + bus3 = EventBus(name='GCMulti3') + EventBus(name='GCMulti4') # Will be GC'd + + gc.collect() # Force garbage collection # Should be able to create new buses with the names of GC'd buses - bus2_new = EventBus(name='Bus2') - bus4_new = EventBus(name='Bus4') + bus2_new = EventBus(name='GCMulti2') + bus4_new = EventBus(name='GCMulti4') # But not with names of buses that still exist - they get auto-generated names - with pytest.warns(UserWarning, match='EventBus with name "Bus1" already exists'): - bus1_conflict = EventBus(name='Bus1') - assert bus1_conflict.name.startswith('Bus1_') + with pytest.warns(UserWarning, match='EventBus with name "GCMulti1" already exists'): + bus1_conflict = EventBus(name='GCMulti1') + assert bus1_conflict.name.startswith('GCMulti1_') - with pytest.warns(UserWarning, match='EventBus with name "Bus3" already exists'): - bus3_conflict = EventBus(name='Bus3') - assert bus3_conflict.name.startswith('Bus3_') + with pytest.warns(UserWarning, match='EventBus with name "GCMulti3" already exists'): + bus3_conflict = EventBus(name='GCMulti3') + assert bus3_conflict.name.startswith('GCMulti3_') @pytest.mark.asyncio async def test_name_conflict_after_stop_and_clear(self): """Test that clearing an EventBus allows reusing its name""" + import gc + # Create an EventBus - bus1 = EventBus(name='TestBus') + bus1 = EventBus(name='GCStopClear') - # Stop and clear it + # Stop and clear it (this renames the bus to _stopped_* and removes from all_instances) await bus1.stop(clear=True) - # Delete the reference to allow garbage collection + # Delete the reference and force GC del bus1 + gc.collect() # Now we should be able to create a new one with the same name - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' + bus2 = EventBus(name='GCStopClear') + assert bus2.name == 'GCStopClear' def test_weakset_behavior(self): """Test that the WeakSet properly tracks EventBus instances""" @@ -131,17 +148,19 @@ def test_weakset_behavior(self): # WeakTest2 might still be there until the next iteration def test_eventbus_removed_from_weakset(self): - """Test that our implementation removes dead EventBus from WeakSet during conflict check""" + """Test that dead EventBus instances are removed from WeakSet after GC""" + import gc + # Create a bus that will be "dead" (no strong references) - EventBus(name='DeadBus') + EventBus(name='GCDeadBus') + gc.collect() # Force garbage collection - # When we try to create a new bus with the same name, the conflict check - # should detect the dead bus and remove it from the WeakSet - bus = EventBus(name='DeadBus') - assert bus.name == 'DeadBus' + # When we try to create a new bus with the same name, it should work + bus = EventBus(name='GCDeadBus') + assert bus.name == 'GCDeadBus' # The dead bus should have been removed from all_instances - names = [b.name for b in EventBus.all_instances if hasattr(b, 'name') and b.name == 'DeadBus'] + names = [b.name for b in EventBus.all_instances if hasattr(b, 'name') and b.name == 'GCDeadBus'] assert len(names) == 1 # Only the new one def test_concurrent_name_creation(self): @@ -156,3 +175,91 @@ def test_concurrent_name_creation(self): assert bus1.name == 'ConcurrentTest' assert bus2.name.startswith('ConcurrentTest_') assert bus2.name != bus1.name + + @pytest.mark.asyncio + async def test_unreferenced_buses_with_history_can_be_cleaned_without_instance_leak(self): + """ + Buses with populated history may outlive local scope while runloops are still active, + but they must be releasable via explicit cleanup without leaking all_instances. + """ + import gc + + class GcHistoryEvent(BaseEvent[str]): + pass + + baseline_instances = len(EventBus.all_instances) + refs: list[weakref.ReferenceType[EventBus]] = [] + + async def create_and_fill_bus(index: int) -> weakref.ReferenceType[EventBus]: + bus = EventBus(name=f'GCNoStopBus_{index}') + bus.on(GcHistoryEvent, lambda e: 'ok') + for _ in range(40): + await bus.dispatch(GcHistoryEvent()) + await bus.wait_until_idle() + return weakref.ref(bus) + + for i in range(30): + refs.append(await create_and_fill_bus(i)) + + # Encourage GC/finalization first (best effort without explicit stop()). + for _ in range(20): + gc.collect() + await asyncio.sleep(0.02) + + alive_buses = [ref() for ref in refs if ref() is not None] + still_live = [bus for bus in alive_buses if bus is not None] + + # Deterministically clean up anything still alive. + for bus in still_live: + await bus.stop(clear=True, timeout=0) + # Loop variable keeps a strong ref to the last bus in CPython. + if still_live: + del bus + del still_live + del alive_buses + + # Final GC and WeakSet purge. + for _ in range(10): + gc.collect() + await asyncio.sleep(0.01) + _ = list(EventBus.all_instances) + + assert all(ref() is None for ref in refs), 'all buses should be collectable after cleanup' + assert len(EventBus.all_instances) <= baseline_instances + + @pytest.mark.asyncio + async def test_unreferenced_buses_with_history_are_collected_without_stop(self): + """ + Unreferenced buses should be collectable without explicit stop(clear=True), + even after processing events and populating history. + """ + import gc + + class GcImplicitEvent(BaseEvent[str]): + pass + + baseline_instances = len(EventBus.all_instances) + refs: list[weakref.ReferenceType[EventBus]] = [] + + async def create_and_fill_bus(index: int) -> weakref.ReferenceType[EventBus]: + bus = EventBus(name=f'GCImplicitNoStop_{index}') + bus.on(GcImplicitEvent, lambda e: 'ok') + for _ in range(30): + await bus.dispatch(GcImplicitEvent()) + await bus.wait_until_idle() + return weakref.ref(bus) + + for i in range(20): + refs.append(await create_and_fill_bus(i)) + + for _ in range(80): + gc.collect() + await asyncio.sleep(0.02) + if all(ref() is None for ref in refs): + break + + # Force WeakSet iteration to purge any dead refs. + _ = list(EventBus.all_instances) + + assert all(ref() is None for ref in refs), 'all unreferenced buses should be collected without stop()' + assert len(EventBus.all_instances) <= baseline_instances diff --git a/tests/test_parent_event_tracking.py b/tests/test_parent_event_tracking.py index c11090c..5fbe659 100644 --- a/tests/test_parent_event_tracking.py +++ b/tests/test_parent_event_tracking.py @@ -41,13 +41,13 @@ class TestParentEventTracking: async def test_basic_parent_tracking(self, eventbus: EventBus): """Test that child events automatically get event_parent_id""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] async def parent_handler(event: ParentEvent) -> str: # Handler that dispatches a child event child = ChildEvent(data=f'child_of_{event.message}') eventbus.dispatch(child) - child_events.append(child) + event_children.append(child) return 'parent_handled' eventbus.on('ParentEvent', parent_handler) # type: ignore[reportUnknownArgumentType] @@ -67,8 +67,8 @@ async def parent_handler(event: ParentEvent) -> str: assert parent_handler_result is not None and parent_handler_result.result == 'parent_handled' # Verify child has event_parent_id set - assert len(child_events) == 1 - child = child_events[0] + assert len(event_children) == 1 + child = event_children[0] assert child.event_parent_id == parent.event_id async def test_multi_level_parent_tracking(self, eventbus: EventBus): @@ -115,14 +115,14 @@ async def grandchild_handler(event: BaseEvent[str]) -> str: async def test_multiple_children_same_parent(self, eventbus: EventBus): """Test multiple child events from same parent""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] async def parent_handler(event: BaseEvent[str]) -> str: # Dispatch multiple children for i in range(3): child = ChildEvent(data=f'child_{i}') eventbus.dispatch(child) - child_events.append(child) + event_children.append(child) return 'spawned_children' eventbus.on('ParentEvent', parent_handler) @@ -134,8 +134,8 @@ async def parent_handler(event: BaseEvent[str]) -> str: await eventbus.wait_until_idle() # All children should have same parent - assert len(child_events) == 3 - for child in child_events: + assert len(event_children) == 3 + for child in event_children: assert child.event_parent_id == parent.event_id async def test_parallel_handlers_parent_tracking(self, eventbus: EventBus): @@ -240,13 +240,13 @@ async def bus2_handler(event: BaseEvent[str]) -> str: async def test_sync_handler_parent_tracking(self, eventbus: EventBus): """Test parent tracking works with sync handlers""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] def sync_parent_handler(event: BaseEvent[str]) -> str: # Sync handler that dispatches child child = ChildEvent(data='from_sync') eventbus.dispatch(child) - child_events.append(child) + event_children.append(child) return 'sync_handled' eventbus.on('ParentEvent', sync_parent_handler) @@ -257,18 +257,18 @@ def sync_parent_handler(event: BaseEvent[str]) -> str: await eventbus.wait_until_idle() # Parent tracking should work even with sync handlers - assert len(child_events) == 1 - assert child_events[0].event_parent_id == parent.event_id + assert len(event_children) == 1 + assert event_children[0].event_parent_id == parent.event_id async def test_error_handler_parent_tracking(self, eventbus: EventBus): """Test parent tracking when handler errors occur""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] async def failing_handler(event: BaseEvent[str]) -> str: # Dispatch child before failing child = ChildEvent(data='before_error') eventbus.dispatch(child) - child_events.append(child) + event_children.append(child) raise ValueError( 'Handler error - expected to fail - testing that parent event tracking works even when handlers error' ) @@ -277,7 +277,7 @@ async def success_handler(event: BaseEvent[str]) -> str: # This should still run child = ChildEvent(data='after_error') eventbus.dispatch(child) - child_events.append(child) + event_children.append(child) return 'success' eventbus.on('ParentEvent', failing_handler) @@ -289,8 +289,8 @@ async def success_handler(event: BaseEvent[str]) -> str: await eventbus.wait_until_idle() # Both children should have event_parent_id despite error - assert len(child_events) == 2 - for child in child_events: + assert len(event_children) == 2 + for child in event_children: assert child.event_parent_id == parent.event_id async def test_event_children_tracking(self, eventbus: EventBus): diff --git a/tests/test_stress_20k_events.py b/tests/test_stress_20k_events.py index 3a75be3..942c85d 100644 --- a/tests/test_stress_20k_events.py +++ b/tests/test_stress_20k_events.py @@ -1,5 +1,8 @@ import asyncio +import functools import gc +import inspect +import math import os import time from typing import Any @@ -8,6 +11,8 @@ import pytest from bubus import BaseEvent, EventBus +import bubus.models as models_module +import bubus.service as service_module def get_memory_usage_mb(): @@ -16,6 +21,299 @@ def get_memory_usage_mb(): return process.memory_info().rss / 1024 / 1024 +def percentile(values: list[float], q: float) -> float: + """Simple percentile helper without numpy dependency.""" + if not values: + return 0.0 + sorted_values = sorted(values) + pos = (len(sorted_values) - 1) * q + low = math.floor(pos) + high = math.ceil(pos) + if low == high: + return sorted_values[int(pos)] + return sorted_values[low] + (sorted_values[high] - sorted_values[low]) * (pos - low) + + +async def dispatch_and_measure( + bus: EventBus, + event_factory: callable, + total_events: int, + batch_size: int = 40, +) -> tuple[float, float, float, float, float]: + """ + Dispatch many events and return: + (throughput_events_per_sec, dispatch_p50_ms, dispatch_p95_ms, done_p50_ms, done_p95_ms) + """ + dispatch_latencies_ms: list[float] = [] + done_latencies_ms: list[float] = [] + pending: list[tuple[BaseEvent[Any], float]] = [] + + async def wait_one(item: tuple[BaseEvent[Any], float]) -> None: + event, t_dispatch_done = item + await event + done_latencies_ms.append((time.perf_counter() - t_dispatch_done) * 1000) + + start = time.perf_counter() + for _ in range(total_events): + t0 = time.perf_counter() + event = bus.dispatch(event_factory()) + dispatch_latencies_ms.append((time.perf_counter() - t0) * 1000) + pending.append((event, time.perf_counter())) + if len(pending) >= batch_size: + await asyncio.gather(*(wait_one(item) for item in pending)) + pending.clear() + + if pending: + await asyncio.gather(*(wait_one(item) for item in pending)) + await bus.wait_until_idle() + + elapsed = time.perf_counter() - start + throughput = total_events / max(elapsed, 1e-9) + return ( + throughput, + percentile(dispatch_latencies_ms, 0.50), + percentile(dispatch_latencies_ms, 0.95), + percentile(done_latencies_ms, 0.50), + percentile(done_latencies_ms, 0.95), + ) + + +async def run_mode_throughput_benchmark( + *, + parallel_handlers: bool, + total_events: int = 5_000, + batch_size: int = 50, +) -> tuple[int, float]: + """Run a basic no-op throughput benchmark for one handler mode.""" + bus = EventBus( + name=f'ThroughputFloor_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + + processed = 0 + + async def handler(event: SimpleEvent) -> None: + nonlocal processed + processed += 1 + + bus.on(SimpleEvent, handler) + + pending: list[BaseEvent[Any]] = [] + start = time.time() + try: + for _ in range(total_events): + pending.append(bus.dispatch(SimpleEvent())) + if len(pending) >= batch_size: + await asyncio.gather(*pending) + pending.clear() + + if pending: + await asyncio.gather(*pending) + + await bus.wait_until_idle() + finally: + await bus.stop(timeout=0, clear=True) + + duration = time.time() - start + throughput = total_events / max(duration, 1e-9) + return processed, throughput + + +async def run_io_fanout_benchmark( + *, + parallel_handlers: bool, + total_events: int = 800, + handlers_per_event: int = 4, + sleep_seconds: float = 0.0015, + batch_size: int = 40, +) -> tuple[int, float]: + """Benchmark I/O-bound fanout to compare serial vs parallel handler mode.""" + bus = EventBus( + name=f'Fanout_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + + handled = 0 + + for index in range(handlers_per_event): + async def handler(event: SimpleEvent) -> None: + nonlocal handled + await asyncio.sleep(sleep_seconds) + handled += 1 + + handler.__name__ = f'fanout_handler_{index}' + bus.on(SimpleEvent, handler) + + pending: list[BaseEvent[Any]] = [] + start = time.time() + try: + for _ in range(total_events): + pending.append(bus.dispatch(SimpleEvent())) + if len(pending) >= batch_size: + await asyncio.gather(*pending) + pending.clear() + + if pending: + await asyncio.gather(*pending) + + await bus.wait_until_idle() + finally: + await bus.stop(timeout=0, clear=True) + + duration = time.time() - start + return handled, duration + + +def throughput_floor_for_mode(parallel_handlers: bool) -> int: + """ + Conservative per-mode floor to catch severe regressions while avoiding CI flakiness. + """ + if parallel_handlers: + return 500 + return 600 + + +def throughput_regression_floor( + first_run_throughput: float, + *, + min_fraction: float, + hard_floor: float, +) -> float: + """ + Scenario+mode regression threshold using same-run baseline + absolute safety floor. + """ + return max(hard_floor, first_run_throughput * min_fraction) + + +class MethodProfiler: + """Lightweight monkeypatch profiler for selected class methods.""" + + def __init__(self) -> None: + self.stats: dict[str, dict[str, float]] = {} + self._restore: list[tuple[type[Any], str, Any]] = [] + + def instrument(self, owner: type[Any], method_name: str, label: str | None = None) -> None: + original = getattr(owner, method_name) + metric_name = label or f'{owner.__name__}.{method_name}' + + if inspect.iscoroutinefunction(original): + @functools.wraps(original) + async def wrapped(*args: Any, **kwargs: Any) -> Any: + started = time.perf_counter() + try: + return await original(*args, **kwargs) + finally: + elapsed = time.perf_counter() - started + metric = self.stats.setdefault(metric_name, {'calls': 0.0, 'total_s': 0.0}) + metric['calls'] += 1.0 + metric['total_s'] += elapsed + else: + @functools.wraps(original) + def wrapped(*args: Any, **kwargs: Any) -> Any: + started = time.perf_counter() + try: + return original(*args, **kwargs) + finally: + elapsed = time.perf_counter() - started + metric = self.stats.setdefault(metric_name, {'calls': 0.0, 'total_s': 0.0}) + metric['calls'] += 1.0 + metric['total_s'] += elapsed + + self._restore.append((owner, method_name, original)) + setattr(owner, method_name, wrapped) + + def restore(self) -> None: + for owner, method_name, original in reversed(self._restore): + setattr(owner, method_name, original) + self._restore.clear() + + def top_lines(self, limit: int = 12) -> list[str]: + ranked = sorted(self.stats.items(), key=lambda item: item[1]['total_s'], reverse=True) + lines: list[str] = [] + for name, metric in ranked[:limit]: + calls = int(metric['calls']) + total_s = metric['total_s'] + avg_us = (total_s * 1_000_000.0) / max(calls, 1) + lines.append(f'{name}: calls={calls:,} total={total_s:.3f}s avg={avg_us:.1f}us') + return lines + + +async def run_contention_round( + *, + parallel_handlers: bool, + bus_count: int = 10, + events_per_bus: int = 120, + batch_size: int = 20, +) -> dict[str, float]: + """ + Concurrently dispatch on many buses to stress global lock contention. + """ + buses = [ + EventBus( + name=f'LockContention_{i}_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + for i in range(bus_count) + ] + counters = [0 for _ in range(bus_count)] + dispatch_latencies_ms: list[float] = [] + done_latencies_ms: list[float] = [] + + for index, bus in enumerate(buses): + def make_handler(handler_index: int): + async def handler(event: SimpleEvent) -> None: + counters[handler_index] += 1 + + handler.__name__ = f'contention_handler_{handler_index}' + return handler + + bus.on(SimpleEvent, make_handler(index)) + + async def wait_batch(batch: list[tuple[BaseEvent[Any], float]]) -> None: + async def wait_one(item: tuple[BaseEvent[Any], float]) -> None: + event, dispatch_done_at = item + await event + done_latencies_ms.append((time.perf_counter() - dispatch_done_at) * 1000) + + await asyncio.gather(*(wait_one(item) for item in batch)) + + async def producer(bus: EventBus) -> None: + pending: list[tuple[BaseEvent[Any], float]] = [] + for _ in range(events_per_bus): + t0 = time.perf_counter() + event = bus.dispatch(SimpleEvent()) + dispatch_latencies_ms.append((time.perf_counter() - t0) * 1000) + pending.append((event, time.perf_counter())) + if len(pending) >= batch_size: + await wait_batch(pending) + pending.clear() + + if pending: + await wait_batch(pending) + await bus.wait_until_idle() + + total_events = bus_count * events_per_bus + start = time.perf_counter() + try: + await asyncio.gather(*(producer(bus) for bus in buses)) + finally: + await asyncio.gather(*(bus.stop(timeout=0, clear=True) for bus in buses)) + + duration = time.perf_counter() - start + return { + 'throughput': total_events / max(duration, 1e-9), + 'dispatch_p50_ms': percentile(dispatch_latencies_ms, 0.50), + 'dispatch_p95_ms': percentile(dispatch_latencies_ms, 0.95), + 'done_p50_ms': percentile(done_latencies_ms, 0.50), + 'done_p95_ms': percentile(done_latencies_ms, 0.95), + 'fairness_min': float(min(counters)), + 'fairness_max': float(max(counters)), + } + + class SimpleEvent(BaseEvent): """Simple event without Generic for performance testing""" @@ -32,7 +330,7 @@ async def test_20k_events_with_memory_control(): print(f'\nInitial memory: {initial_memory:.1f} MB') # Create EventBus with proper limits (now default) - bus = EventBus(name='ManyEvents') + bus = EventBus(name='ManyEvents', middlewares=[]) print('EventBus settings:') print(f' max_history_size: {bus.max_history_size}') @@ -158,7 +456,7 @@ async def handler(event: SimpleEvent) -> None: @pytest.mark.asyncio async def test_hard_limit_enforcement(): """Test that hard limit of 100 pending events is enforced""" - bus = EventBus(name='HardLimitTest') + bus = EventBus(name='HardLimitTest', middlewares=[]) try: # Create a slow handler to keep events pending @@ -167,11 +465,11 @@ async def slow_handler(event: SimpleEvent) -> None: bus.on('SimpleEvent', slow_handler) - # Try to dispatch more than 100 events + # Try to dispatch more than the pending limit events_dispatched = 0 errors = 0 - for _ in range(150): + for _ in range(200): try: bus.dispatch(SimpleEvent()) events_dispatched += 1 @@ -185,7 +483,8 @@ async def slow_handler(event: SimpleEvent) -> None: print(f'Hit capacity error {errors} times') # Should hit the limit - assert events_dispatched <= 100 + assert bus.max_history_size is not None + assert events_dispatched <= bus.max_history_size assert errors > 0 finally: @@ -196,7 +495,7 @@ async def slow_handler(event: SimpleEvent) -> None: @pytest.mark.asyncio async def test_cleanup_prioritizes_pending(): """Test that cleanup keeps pending events and removes completed ones""" - bus = EventBus(name='CleanupTest', max_history_size=10) + bus = EventBus(name='CleanupTest', max_history_size=10, middlewares=[]) try: # Process some events to completion @@ -234,9 +533,861 @@ async def slow_handler(event: BaseEvent) -> None: # Should have removed completed events to make room for pending assert bus.max_history_size is not None - assert len(bus.event_history) <= bus.max_history_size + assert len(bus.event_history) <= bus.max_history_size * 1.2 # allow for some overhead to avoid frequent gc pausing assert history_types.get('pending', 0) + history_types.get('started', 0) >= 5 finally: # Properly stop the bus to clean up pending tasks await bus.stop(timeout=0, clear=True) # Don't wait, just force cleanup + + +@pytest.mark.asyncio +async def test_ephemeral_buses_with_forwarding_churn(): + """ + Closest Python equivalent to request-scoped bus churn: + create short-lived buses, forward between them, process events, then clear. + """ + total_bus_pairs = 60 + events_per_pair = 20 + total_events = total_bus_pairs * events_per_pair + initial_instances = len(EventBus.all_instances) + + handled_a = 0 + handled_b = 0 + + start = time.time() + + for idx in range(total_bus_pairs): + bus_a = EventBus(name=f'EphemeralA_{idx}_{os.getpid()}', middlewares=[]) + bus_b = EventBus(name=f'EphemeralB_{idx}_{os.getpid()}', middlewares=[]) + + async def handler_a(event: SimpleEvent) -> None: + nonlocal handled_a + handled_a += 1 + + async def handler_b(event: SimpleEvent) -> None: + nonlocal handled_b + handled_b += 1 + + bus_a.on(SimpleEvent, handler_a) + bus_b.on(SimpleEvent, handler_b) + bus_a.on('*', bus_b.dispatch) + + try: + pending = [bus_a.dispatch(SimpleEvent()) for _ in range(events_per_pair)] + await asyncio.gather(*pending) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + + assert bus_a.max_history_size is None or len(bus_a.event_history) <= bus_a.max_history_size + assert bus_b.max_history_size is None or len(bus_b.event_history) <= bus_b.max_history_size + finally: + await bus_a.stop(timeout=0, clear=True) + await bus_b.stop(timeout=0, clear=True) + + duration = time.time() - start + gc.collect() + + assert handled_a == total_events + assert handled_b == total_events + assert len(EventBus.all_instances) <= initial_instances + assert duration < 60, f'Ephemeral bus churn took too long: {duration:.2f}s' + + +@pytest.mark.asyncio +async def test_forwarding_queue_jump_timeout_mix_stays_stable(): + """ + Stress a mixed path in Python: + parent handler awaits forwarded child events, with intermittent child timeouts. + """ + class MixedParentEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.2 + + class MixedChildEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.05 + + history_limit = 500 + total_iterations = 300 + + bus_a = EventBus(name='MixedPathA', max_history_size=history_limit, middlewares=[]) + bus_b = EventBus(name='MixedPathB', max_history_size=history_limit, middlewares=[]) + + parent_handled = 0 + child_handled = 0 + child_events: list[MixedChildEvent] = [] + + async def child_handler(event: MixedChildEvent) -> str: + nonlocal child_handled + child_handled += 1 + if event.iteration % 7 == 0: + await asyncio.sleep(0.01) + else: + await asyncio.sleep(0.0005) + return 'child_done' + + async def parent_handler(event: MixedParentEvent) -> str: + nonlocal parent_handled + parent_handled += 1 + + child_timeout = 0.001 if event.iteration % 7 == 0 else 0.05 + child = bus_a.dispatch(MixedChildEvent(iteration=event.iteration, event_timeout=child_timeout)) + bus_b.dispatch(child) + child_events.append(child) + await child + return 'parent_done' + + bus_a.on(MixedParentEvent, parent_handler) + bus_b.on(MixedChildEvent, child_handler) + + start = time.time() + try: + for i in range(total_iterations): + await bus_a.dispatch(MixedParentEvent(iteration=i)) + + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + finally: + await bus_a.stop(timeout=0, clear=True) + await bus_b.stop(timeout=0, clear=True) + + duration = time.time() - start + + assert parent_handled == total_iterations + assert child_handled == total_iterations + timeout_count = sum( + 1 + for child in child_events + if any(isinstance(result.error, TimeoutError) for result in child.event_results.values()) + ) + assert timeout_count > 0 + assert len(bus_a.event_history) <= history_limit + assert len(bus_b.event_history) <= history_limit + assert duration < 60, f'Mixed forwarding/queue-jump/timeout path took too long: {duration:.2f}s' + + +@pytest.mark.asyncio +async def test_history_bound_is_strict_after_idle(): + """After steady-state processing, history should stay within max_history_size.""" + bus = EventBus(name='StrictHistoryBound', max_history_size=25, middlewares=[]) + + async def handler(event: SimpleEvent) -> None: + return None + + bus.on(SimpleEvent, handler) + + try: + for _ in range(200): + await bus.dispatch(SimpleEvent()) + + await bus.wait_until_idle() + assert len(bus.event_history) <= 25 + finally: + await bus.stop(timeout=0, clear=True) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_basic_throughput_floor_regression_guard(parallel_handlers: bool): + """ + Throughput regression guard across Python's handler concurrency modes. + Keeps threshold conservative to avoid CI flakiness while still catching + severe slowdowns. + """ + processed, rate = await run_mode_throughput_benchmark(parallel_handlers=parallel_handlers) + + assert processed == 5_000 + minimum_rate = throughput_floor_for_mode(parallel_handlers) + mode = 'parallel' if parallel_handlers else 'serial' + assert rate >= minimum_rate, ( + f'{mode} throughput regression: {rate:.0f} events/sec ' + f'(expected >= {minimum_rate} events/sec)' + ) + + +@pytest.mark.asyncio +async def test_parallel_handlers_mode_improves_io_bound_fanout(): + """ + For I/O-bound workloads with multiple handlers per event, parallel mode should + provide a meaningful speedup versus serial mode. + """ + serial_handled, serial_duration = await run_io_fanout_benchmark(parallel_handlers=False) + parallel_handled, parallel_duration = await run_io_fanout_benchmark(parallel_handlers=True) + + expected_total = 800 * 4 + assert serial_handled == expected_total + assert parallel_handled == expected_total + assert parallel_duration < serial_duration * 0.8, ( + f'Expected parallel handler mode to be faster for I/O fanout; ' + f'serial={serial_duration:.2f}s parallel={parallel_duration:.2f}s' + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_forwarding_throughput_floor_across_modes(parallel_handlers: bool): + """ + Regression guard for forwarding path in both handler execution modes. + """ + source_bus = EventBus( + name=f'ForwardSource_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + target_bus = EventBus( + name=f'ForwardTarget_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + + handled = 0 + + async def sink_handler(event: SimpleEvent) -> None: + nonlocal handled + handled += 1 + + source_bus.on('*', target_bus.dispatch) + target_bus.on(SimpleEvent, sink_handler) + + total_events = 3_000 + pending: list[BaseEvent[Any]] = [] + batch_size = 40 + start = time.time() + try: + for _ in range(total_events): + pending.append(source_bus.dispatch(SimpleEvent())) + if len(pending) >= batch_size: + await asyncio.gather(*pending) + pending.clear() + + if pending: + await asyncio.gather(*pending) + await source_bus.wait_until_idle() + await target_bus.wait_until_idle() + finally: + await source_bus.stop(timeout=0, clear=True) + await target_bus.stop(timeout=0, clear=True) + + duration = time.time() - start + throughput = total_events / max(duration, 1e-9) + floor = 200 + + assert handled == total_events + mode = 'parallel' if parallel_handlers else 'serial' + assert throughput >= floor, ( + f'{mode} forwarding throughput regression: {throughput:.0f} events/sec ' + f'(expected >= {floor} events/sec)' + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_global_lock_contention_multi_bus_matrix(parallel_handlers: bool): + """ + High-contention benchmark: many buses dispatching concurrently under global lock. + """ + phase1 = await run_contention_round(parallel_handlers=parallel_handlers) + phase2 = await run_contention_round(parallel_handlers=parallel_handlers) + + expected_per_bus = 120.0 + hard_floor = 120.0 + regression_floor = throughput_regression_floor( + phase1['throughput'], + min_fraction=0.55, + hard_floor=90.0, + ) + + assert phase1['fairness_min'] == expected_per_bus + assert phase1['fairness_max'] == expected_per_bus + assert phase2['fairness_min'] == expected_per_bus + assert phase2['fairness_max'] == expected_per_bus + assert phase1['throughput'] >= hard_floor, ( + f'lock-contention throughput too low: {phase1["throughput"]:.0f} events/sec ' + f'(expected >= {hard_floor:.0f})' + ) + assert phase2['throughput'] >= regression_floor, ( + f'lock-contention regression: phase1={phase1["throughput"]:.0f} ' + f'phase2={phase2["throughput"]:.0f} ' + f'(required >= {regression_floor:.0f})' + ) + assert phase2['dispatch_p95_ms'] < 25.0 + assert phase2['done_p95_ms'] < 250.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'handlers_per_event', + [10, 30], + ids=['fanout_10_handlers', 'fanout_30_handlers'], +) +async def test_parallel_handlers_mode_scales_with_high_fanout(handlers_per_event: int): + """ + High fanout benchmark to catch regressions in parallel handler scheduling. + """ + serial_handled, serial_duration = await run_io_fanout_benchmark( + parallel_handlers=False, + total_events=400, + handlers_per_event=handlers_per_event, + sleep_seconds=0.001, + batch_size=25, + ) + parallel_handled, parallel_duration = await run_io_fanout_benchmark( + parallel_handlers=True, + total_events=400, + handlers_per_event=handlers_per_event, + sleep_seconds=0.001, + batch_size=25, + ) + + expected_total = 400 * handlers_per_event + speedup = serial_duration / max(parallel_duration, 1e-9) + minimum_speedup = 1.2 if handlers_per_event == 10 else 1.5 + + assert serial_handled == expected_total + assert parallel_handled == expected_total + assert speedup >= minimum_speedup, ( + f'Parallel fanout speedup too small for {handlers_per_event} handlers/event: ' + f'{speedup:.2f}x (expected >= {minimum_speedup:.2f}x)' + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_queue_jump_perf_matrix_by_mode(parallel_handlers: bool): + """ + Queue-jump throughput/latency matrix (parent awaits child on same bus) by mode. + """ + class QueueJumpParentEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.2 + + class QueueJumpChildEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.2 + + bus = EventBus( + name=f'QueueJump_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + + parent_count = 0 + child_count = 0 + phase_counter = 0 + + async def child_handler(event: QueueJumpChildEvent) -> None: + nonlocal child_count + child_count += 1 + await asyncio.sleep(0.0005) + + async def parent_handler(event: QueueJumpParentEvent) -> None: + nonlocal parent_count + parent_count += 1 + child = bus.dispatch(QueueJumpChildEvent(iteration=event.iteration)) + await child + + bus.on(QueueJumpParentEvent, parent_handler) + bus.on(QueueJumpChildEvent, child_handler) + + def parent_factory() -> QueueJumpParentEvent: + nonlocal phase_counter + event = QueueJumpParentEvent(iteration=phase_counter) + phase_counter += 1 + return event + + try: + phase1 = await dispatch_and_measure(bus, parent_factory, total_events=500, batch_size=20) + phase2 = await dispatch_and_measure(bus, parent_factory, total_events=500, batch_size=20) + finally: + await bus.stop(timeout=0, clear=True) + + hard_floor = 60.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.50, hard_floor=50.0) + + assert parent_count == 1_000 + assert child_count == 1_000 + assert phase1[0] >= hard_floor, ( + f'queue-jump throughput too low: {phase1[0]:.0f} events/sec (expected >= {hard_floor:.0f})' + ) + assert phase2[0] >= regression_floor, ( + f'queue-jump regression: phase1={phase1[0]:.0f} phase2={phase2[0]:.0f} ' + f'(required >= {regression_floor:.0f})' + ) + assert phase2[2] < 15.0 + assert phase2[4] < 120.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_forwarding_chain_perf_matrix_by_mode(parallel_handlers: bool): + """ + Forwarding chain A -> B -> C throughput/latency matrix by mode. + """ + source_bus = EventBus( + name=f'ChainSource_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=120, + middlewares=[], + ) + middle_bus = EventBus( + name=f'ChainMiddle_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=120, + middlewares=[], + ) + sink_bus = EventBus( + name=f'ChainSink_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=120, + middlewares=[], + ) + + sink_count = 0 + + async def sink_handler(event: SimpleEvent) -> None: + nonlocal sink_count + sink_count += 1 + + async def forward_to_middle(event: BaseEvent[Any]) -> None: + while True: + try: + middle_bus.dispatch(event) + return + except asyncio.QueueFull: + await asyncio.sleep(0) + except RuntimeError as exc: + if 'EventBus at capacity' not in str(exc): + raise + await asyncio.sleep(0) + + async def forward_to_sink(event: BaseEvent[Any]) -> None: + while True: + try: + sink_bus.dispatch(event) + return + except asyncio.QueueFull: + await asyncio.sleep(0) + except RuntimeError as exc: + if 'EventBus at capacity' not in str(exc): + raise + await asyncio.sleep(0) + + source_bus.on('*', forward_to_middle) + middle_bus.on('*', forward_to_sink) + sink_bus.on(SimpleEvent, sink_handler) + + try: + phase1 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=500, batch_size=5) + phase2 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=500, batch_size=5) + await source_bus.wait_until_idle() + await middle_bus.wait_until_idle() + await sink_bus.wait_until_idle() + finally: + await source_bus.stop(timeout=0, clear=True) + await middle_bus.stop(timeout=0, clear=True) + await sink_bus.stop(timeout=0, clear=True) + + hard_floor = 35.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.45, hard_floor=20.0) + + assert sink_count == 1_000 + assert phase1[0] >= hard_floor + assert phase2[0] >= regression_floor + assert phase2[2] < 40.0 + assert phase2[4] < 350.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_timeout_churn_perf_matrix_by_mode(parallel_handlers: bool): + """ + Timeout-heavy phase followed by healthy phase should keep throughput healthy. + """ + class TimeoutChurnEvent(BaseEvent): + mode: str = 'slow' + iteration: int = 0 + event_timeout: float | None = 0.01 + + bus = EventBus( + name=f'TimeoutChurn_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + + timeout_phase_events: list[TimeoutChurnEvent] = [] + recovery_phase_events: list[TimeoutChurnEvent] = [] + timeout_counter = 0 + recovery_counter = 0 + + async def handler(event: TimeoutChurnEvent) -> None: + if event.mode == 'slow': + await asyncio.sleep(0.006) + else: + await asyncio.sleep(0) + + bus.on(TimeoutChurnEvent, handler) + + def timeout_factory() -> TimeoutChurnEvent: + nonlocal timeout_counter + is_slow = (timeout_counter % 3) != 0 + event = TimeoutChurnEvent( + mode='slow' if is_slow else 'fast', + iteration=timeout_counter, + event_timeout=0.001 if is_slow else 0.02, + ) + timeout_phase_events.append(event) + timeout_counter += 1 + return event + + def recovery_factory() -> TimeoutChurnEvent: + nonlocal recovery_counter + event = TimeoutChurnEvent( + mode='fast', + iteration=10_000 + recovery_counter, + event_timeout=0.02, + ) + recovery_phase_events.append(event) + recovery_counter += 1 + return event + + try: + timeout_phase = await dispatch_and_measure(bus, timeout_factory, total_events=180, batch_size=20) + recovery_phase = await dispatch_and_measure(bus, recovery_factory, total_events=500, batch_size=25) + finally: + await bus.stop(timeout=0, clear=True) + + timeout_count = sum( + 1 + for event in timeout_phase_events + if event.mode == 'slow' + and any(isinstance(result.error, TimeoutError) for result in event.event_results.values()) + ) + recovery_errors = sum( + 1 + for event in recovery_phase_events + if any(result.error is not None for result in event.event_results.values()) + ) + hard_floor = 120.0 + regression_floor = throughput_regression_floor( + timeout_phase[0], + min_fraction=0.45, + hard_floor=100.0, + ) + + assert timeout_count > 0 + assert recovery_errors == 0 + assert recovery_phase[0] >= hard_floor + assert recovery_phase[0] >= regression_floor + assert recovery_phase[2] < 12.0 + assert recovery_phase[4] < 70.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_memory_envelope_by_mode_for_capped_history(parallel_handlers: bool): + """ + Mode-specific memory slope/envelope check with capped history. + """ + bus = EventBus( + name=f'MemoryEnvelope_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=60, + middlewares=[], + ) + + async def handler(event: SimpleEvent) -> None: + return None + + bus.on(SimpleEvent, handler) + + gc.collect() + before_mb = get_memory_usage_mb() + + try: + metrics = await dispatch_and_measure(bus, SimpleEvent, total_events=6_000, batch_size=40) + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + retained = len(bus.event_history) + finally: + await bus.stop(timeout=0, clear=True) + + done_delta = done_mb - before_mb + gc_delta = gc_mb - before_mb + per_dispatched_kb = (max(done_delta, 0.0) * 1024.0) / 6_000 + per_retained_mb = max(gc_delta, 0.0) / max(retained, 1) + done_budget = 130.0 if parallel_handlers else 110.0 + gc_budget = 70.0 if parallel_handlers else 60.0 + + assert retained <= 60 + assert metrics[0] >= 450.0 + assert metrics[2] < 10.0 + assert metrics[4] < 60.0 + assert done_delta < done_budget + assert gc_delta < gc_budget + assert per_dispatched_kb < 32.0 + assert per_retained_mb < 1.5 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_max_history_none_single_bus_stress_matrix(parallel_handlers: bool): + """ + Unlimited-history mode stress for single bus: throughput + memory envelope. + """ + bus = EventBus( + name=f'UnlimitedSingle_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=None, + middlewares=[], + ) + processed = 0 + + async def handler(event: SimpleEvent) -> None: + nonlocal processed + processed += 1 + + bus.on(SimpleEvent, handler) + + gc.collect() + before_mb = get_memory_usage_mb() + try: + phase1 = await dispatch_and_measure(bus, SimpleEvent, total_events=1_500, batch_size=120) + phase2 = await dispatch_and_measure(bus, SimpleEvent, total_events=1_500, batch_size=120) + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + history_size = len(bus.event_history) + finally: + await bus.stop(timeout=0, clear=True) + + done_delta = done_mb - before_mb + gc_delta = gc_mb - before_mb + per_event_mb = max(gc_delta, 0.0) / 3_000 + hard_floor = 220.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.55, hard_floor=170.0) + + assert processed == 3_000 + assert history_size == 3_000 + assert phase1[0] >= hard_floor + assert phase2[0] >= regression_floor + assert phase2[2] < 12.0 + assert phase2[4] < 80.0 + assert done_delta < 260.0 + assert gc_delta < 220.0 + assert per_event_mb < 0.08 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_max_history_none_forwarding_chain_stress_matrix(parallel_handlers: bool): + """ + Unlimited-history forwarding chain (A -> B -> C) stress by mode. + """ + source_bus = EventBus( + name=f'UnlimitedChainSource_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=None, + middlewares=[], + ) + middle_bus = EventBus( + name=f'UnlimitedChainMiddle_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=None, + middlewares=[], + ) + sink_bus = EventBus( + name=f'UnlimitedChainSink_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=None, + middlewares=[], + ) + + sink_count = 0 + + async def sink_handler(event: SimpleEvent) -> None: + nonlocal sink_count + sink_count += 1 + + source_bus.on('*', middle_bus.dispatch) + middle_bus.on('*', sink_bus.dispatch) + sink_bus.on(SimpleEvent, sink_handler) + + gc.collect() + before_mb = get_memory_usage_mb() + try: + phase1 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=900, batch_size=100) + phase2 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=900, batch_size=100) + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + source_hist = len(source_bus.event_history) + middle_hist = len(middle_bus.event_history) + sink_hist = len(sink_bus.event_history) + finally: + await source_bus.stop(timeout=0, clear=True) + await middle_bus.stop(timeout=0, clear=True) + await sink_bus.stop(timeout=0, clear=True) + + gc_delta = gc_mb - before_mb + done_delta = done_mb - before_mb + hard_floor = 170.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.55, hard_floor=130.0) + + assert sink_count == 1_800 + assert source_hist == 1_800 + assert middle_hist == 1_800 + assert sink_hist == 1_800 + assert phase1[0] >= hard_floor + assert phase2[0] >= regression_floor + assert phase2[2] < 15.0 + assert phase2[4] < 100.0 + assert done_delta < 320.0 + assert gc_delta < 280.0 + + +@pytest.mark.asyncio +@pytest.mark.skipif( + os.getenv('BUBUS_PERF_DEBUG') != '1', + reason='Set BUBUS_PERF_DEBUG=1 to enable hot-path timing diagnostics', +) +async def test_perf_debug_hot_path_breakdown() -> None: + """ + Debug-only perf test: + profiles key hot-path methods to confirm where time is spent before optimizing. + """ + profiler = MethodProfiler() + instrumented = [ + (service_module.ReentrantLock, '__aenter__'), + (service_module.ReentrantLock, '__aexit__'), + (service_module.EventBus, '_get_applicable_handlers'), + (service_module.EventBus, '_would_create_loop'), + (service_module.EventBus, '_execute_handlers'), + (service_module.EventBus, 'execute_handler'), + (service_module.EventBus, 'cleanup_event_history'), + (models_module.BaseEvent, 'event_create_pending_results'), + (models_module.BaseEvent, '_is_queued_on_any_bus'), + (models_module.BaseEvent, '_remove_self_from_queue'), + (models_module.BaseEvent, '_process_self_on_all_buses'), + ] + for owner, method_name in instrumented: + profiler.instrument(owner, method_name) + + class DebugParentEvent(BaseEvent): + idx: int = 0 + event_timeout: float | None = 0.2 + + class DebugChildEvent(BaseEvent): + idx: int = 0 + event_timeout: float | None = 0.2 + + bus_a = EventBus(name='PerfDebugA', middlewares=[]) + bus_b = EventBus(name='PerfDebugB', middlewares=[]) + + forwarded_simple_count = 0 + child_count = 0 + parent_counter = 0 + + async def forwarded_simple_handler(event: SimpleEvent) -> None: + nonlocal forwarded_simple_count + forwarded_simple_count += 1 + + async def child_handler(event: DebugChildEvent) -> None: + nonlocal child_count + child_count += 1 + await asyncio.sleep(0) + + async def parent_handler(event: DebugParentEvent) -> None: + child = bus_a.dispatch(DebugChildEvent(idx=event.idx)) + bus_b.dispatch(child) + await child + + bus_a.on('*', bus_b.dispatch) + bus_b.on(SimpleEvent, forwarded_simple_handler) + bus_a.on(DebugParentEvent, parent_handler) + bus_b.on(DebugChildEvent, child_handler) + + def parent_factory() -> DebugParentEvent: + nonlocal parent_counter + event = DebugParentEvent(idx=parent_counter) + parent_counter += 1 + return event + + gc.collect() + before_mb = get_memory_usage_mb() + start = time.perf_counter() + try: + simple_metrics = await dispatch_and_measure(bus_a, SimpleEvent, total_events=2_000, batch_size=50) + parent_metrics = await dispatch_and_measure(bus_a, parent_factory, total_events=600, batch_size=20) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + finally: + await bus_a.stop(timeout=0, clear=True) + await bus_b.stop(timeout=0, clear=True) + profiler.restore() + elapsed = time.perf_counter() - start + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + + print('\n[perf-debug] scenario=global_fifo_forwarding_queue_jump') + print(f'[perf-debug] elapsed_s={elapsed:.3f}') + print( + '[perf-debug] simple throughput={:.0f}/s dispatch_p95={:.3f}ms done_p95={:.3f}ms'.format( + simple_metrics[0], simple_metrics[2], simple_metrics[4] + ) + ) + print( + '[perf-debug] queue_jump throughput={:.0f}/s dispatch_p95={:.3f}ms done_p95={:.3f}ms'.format( + parent_metrics[0], parent_metrics[2], parent_metrics[4] + ) + ) + print( + '[perf-debug] memory_mb before={:.1f} done={:.1f} gc={:.1f}'.format( + before_mb, done_mb, gc_mb + ) + ) + print(f'[perf-debug] forwarded_simple_count={forwarded_simple_count:,} child_count={child_count:,}') + print('[perf-debug] hot_path_top_total_time:') + for line in profiler.top_lines(limit=14): + print(f'[perf-debug] {line}') + + assert forwarded_simple_count == 2_000 + assert child_count == 600 diff --git a/tests/test_typed_event_results.py b/tests/test_typed_event_results.py index ee9833d..8613868 100644 --- a/tests/test_typed_event_results.py +++ b/tests/test_typed_event_results.py @@ -1,5 +1,8 @@ """Test typed event results with automatic casting.""" +# pyright: reportAssertTypeFailure=false +# pyright: reportUnnecessaryIsInstance=false + import asyncio from typing import Any, assert_type @@ -183,6 +186,33 @@ class CustomResult(BaseModel): class SpecificEvent(BaseEvent[CustomResult]): request_id: str = 'test123' + # Validate inline isinstance usage works with await expect() + async def dispatch_inline_isinstance(): + await asyncio.sleep(0.01) + bus.dispatch(SpecificEvent(request_id='inline-isinstance')) + + inline_isinstance_task = asyncio.create_task(dispatch_inline_isinstance()) + assert isinstance(await bus.expect(SpecificEvent, timeout=1.0), SpecificEvent) + await inline_isinstance_task + + # Validate inline assert_type usage works with await expect() + async def dispatch_inline_assert_type(): + await asyncio.sleep(0.01) + bus.dispatch(SpecificEvent(request_id='inline-assert-type')) + + inline_type_task = asyncio.create_task(dispatch_inline_assert_type()) + assert_type(await bus.expect(SpecificEvent, timeout=1.0), SpecificEvent) + await inline_type_task + + # Validate assert_type with isinstance expression + async def dispatch_inline_isinstance_type(): + await asyncio.sleep(0.01) + bus.dispatch(SpecificEvent(request_id='inline-isinstance-type')) + + inline_isinstance_type_task = asyncio.create_task(dispatch_inline_isinstance_type()) + assert_type(isinstance(await bus.expect(SpecificEvent, timeout=1.0), SpecificEvent), bool) + await inline_isinstance_type_task + # Start a task that will dispatch the event async def dispatch_later(): await asyncio.sleep(0.01) @@ -192,6 +222,8 @@ async def dispatch_later(): # Use expect with the event class - should return SpecificEvent type expected_event = await bus.expect(SpecificEvent, timeout=1.0) + assert expected_event is not None + assert isinstance(expected_event, SpecificEvent) # Type checking - this should work without cast assert_type(expected_event, SpecificEvent) # Verify type is SpecificEvent, not BaseEvent[Any] @@ -214,8 +246,10 @@ async def dispatch_multiple(): include=lambda e: e.request_id == 'correct', # type: ignore timeout=1.0, ) + assert filtered_event is not None assert_type(filtered_event, SpecificEvent) # Should still be SpecificEvent + assert isinstance(filtered_event, SpecificEvent) assert type(filtered_event) is SpecificEvent assert filtered_event.request_id == 'correct' @@ -226,6 +260,7 @@ async def dispatch_string_event(): dispatch_task3 = asyncio.create_task(dispatch_string_event()) string_event = await bus.expect('StringEvent', timeout=1.0) + assert string_event is not None assert_type(string_event, BaseEvent[Any]) # Should be BaseEvent[Any] assert string_event.event_type == 'StringEvent' @@ -240,6 +275,33 @@ async def dispatch_string_event(): await bus.stop(clear=True) +async def test_query_type_inference(): + """Test that EventBus.query() returns the correct typed event.""" + print('\n=== Test Query Type Inference ===') + + bus = EventBus(name='query_type_test_bus') + + class QueryEvent(BaseEvent[str]): + pass + + # Dispatch an event so it appears in history + event = bus.dispatch(QueryEvent()) + await bus.wait_until_idle() + + assert isinstance(await bus.query(QueryEvent, since=10), QueryEvent) + assert_type(await bus.query(QueryEvent, since=10), QueryEvent) + assert_type(isinstance(await bus.query(QueryEvent, since=10), QueryEvent), bool) + queried = await bus.query(QueryEvent, since=10) + + assert queried is not None + assert isinstance(queried, QueryEvent) + assert_type(queried, QueryEvent) + assert queried.event_id == event.event_id + + print(f'βœ… Query correctly preserved type: {type(queried).__name__}') + await bus.stop(clear=True) + + async def test_dispatch_type_inference(): """Test that EventBus.dispatch() returns the same type as its input.""" print('\n=== Test Dispatch Type Inference ===') @@ -257,6 +319,7 @@ class CustomEvent(BaseEvent[CustomResult]): # Dispatch should return the same type WITHOUT needing cast() dispatched_event = bus.dispatch(original_event) + assert isinstance(dispatched_event, CustomEvent) # Type checking - this should work without cast assert_type(dispatched_event, CustomEvent) # Should be CustomEvent, not BaseEvent[Any] @@ -271,6 +334,19 @@ async def handler(event: CustomEvent) -> CustomResult: bus.on('CustomEvent', handler) + # Validate inline isinstance usage works with dispatch() + another_event = CustomEvent() + assert isinstance(bus.dispatch(another_event), CustomEvent) + + # Validate assert_type captures dispatch() return type when called inline + type_event = CustomEvent() + dispatched_type_event = bus.dispatch(type_event) + assert_type(dispatched_type_event, CustomEvent) + + # Validate assert_type with isinstance expression using dispatch() + isinstance_type_event = CustomEvent() + assert_type(isinstance(bus.dispatch(isinstance_type_event), CustomEvent), bool) + # We should be able to use it without casting result = await dispatched_event.event_result() @@ -285,6 +361,10 @@ async def handler(event: CustomEvent) -> CustomResult: # Before: event = cast(CustomEvent, bus.dispatch(CustomEvent())) # After: event = bus.dispatch(CustomEvent()) # Type is preserved! + await another_event.event_result() + await type_event.event_result() + await isinstance_type_event.event_result() + print(f'βœ… Dispatch correctly preserved type: {type(dispatched_event).__name__}') print('βœ… No cast() needed - type inference works!') await bus.stop(clear=True) @@ -298,6 +378,7 @@ async def test_typed_event_results(): await test_no_casting_when_no_result_type() await test_result_type_stored_in_event_result() await test_expect_type_inference() + await test_query_type_inference() await test_dispatch_type_inference() print('\nπŸŽ‰ All typed event result tests passed!') diff --git a/ui/README.md b/ui/README.md new file mode 100644 index 0000000..b6b8663 --- /dev/null +++ b/ui/README.md @@ -0,0 +1,38 @@ +# bubus Monitoring Dashboard UI + +Minimal FastAPI Web UI application that reads the `events_log` and `event_results_log` tables produced by the `SQLiteHistoryMirrorMiddleware` and exposes them over HTTP/WebSocket for live monitoring by an administrator / developer. + +## Quick start + +```bash +git clone https://github.com/browser-use/bubus.git +cd bubus +uv venv +uv pip install fastapi 'uvicorn[standard]' +``` + +```bash +# generate and save a live stream of test events (creates/appends to ./events.sqlite) +export EVENT_HISTORY_DB=./events.sqlite +uv run python -m monitor_app.test_events & +``` + +```bash +# run the UI backend server and then open the UI in your browser +uv run uvicorn ui.main:app --reload +open http://localhost:8000 +``` + +You should now see on [http://localhost:8000](http://localhost:8000) a simple dashboard that shows recent events and handler results in real-time (via WebSocket). + +Replace `events.sqlite` with any db matching that schema to use in other codebases. + +## Endpoints + +- `GET /events?limit=20` – latest events (JSON) +- `GET /results?limit=20` – latest handler results (JSON) +- `GET /meta` – database path + existence flag +- `GET /` – minimal HTML dashboard +- `WS /ws/events` – pushes new rows as they arrive (`{"events": [...], "results": [...]}`) + +This app is intentionally small so you can vibecode-extend it with additional metrics, authentication, or richer UI as needed. diff --git a/ui/__init__.py b/ui/__init__.py new file mode 100644 index 0000000..9bf2e16 --- /dev/null +++ b/ui/__init__.py @@ -0,0 +1,5 @@ +"""Minimal FastAPI app for monitoring bubus SQLite event history.""" + +from .main import app + +__all__ = ['app'] diff --git a/ui/config.py b/ui/config.py new file mode 100644 index 0000000..b4165f2 --- /dev/null +++ b/ui/config.py @@ -0,0 +1,19 @@ +"""Configuration helpers for the monitoring app.""" + +import os +from pathlib import Path + +DEFAULT_DB_PATH = Path(os.getenv('EVENT_HISTORY_DB', 'events.sqlite')) + + +def resolve_db_path() -> Path: + """ + Resolve the path to the SQLite history database. + + The path can be overridden via the EVENT_HISTORY_DB environment variable. + """ + db_path = Path(os.getenv('EVENT_HISTORY_DB', DEFAULT_DB_PATH)) + if not db_path.is_absolute(): + # Resolve relative to repository root (parent directory of ui) + db_path = Path(__file__).resolve().parent.parent / db_path + return db_path diff --git a/ui/db.py b/ui/db.py new file mode 100644 index 0000000..ecbd84c --- /dev/null +++ b/ui/db.py @@ -0,0 +1,108 @@ +"""Async helpers for reading the SQLite event history.""" + +from __future__ import annotations + +import asyncio +import sqlite3 +from dataclasses import dataclass +from typing import Any, List + +from .config import resolve_db_path + + +def _connect() -> sqlite3.Connection: + conn = sqlite3.connect(resolve_db_path(), check_same_thread=False) + conn.row_factory = sqlite3.Row + return conn + + +async def fetch_events(limit: int = 50) -> list[dict[str, Any]]: + return await asyncio.to_thread(_fetch_events_sync, limit) + + +def _fetch_events_sync(limit: int) -> list[dict[str, Any]]: + conn = _connect() + try: + rows = conn.execute( + """ + SELECT id, event_id, event_type, event_status, eventbus_name, phase, event_json, inserted_at + FROM events_log + ORDER BY inserted_at DESC + LIMIT ? + """, + (limit,), + ).fetchall() + return [dict(row) for row in rows] + finally: + conn.close() + + +async def fetch_results(limit: int = 50) -> list[dict[str, Any]]: + return await asyncio.to_thread(_fetch_results_sync, limit) + + +def _fetch_results_sync(limit: int) -> list[dict[str, Any]]: + conn = _connect() + try: + rows = conn.execute( + """ + SELECT id, event_id, event_result_id, handler_name, status, phase, result_repr, error_repr, + eventbus_name, event_result_json, inserted_at + FROM event_results_log + ORDER BY inserted_at DESC + LIMIT ? + """, + (limit,), + ).fetchall() + return [dict(row) for row in rows] + finally: + conn.close() + + +@dataclass +class HistoryStreamState: + last_event_id: int = 0 + last_result_id: int = 0 + + +async def stream_new_rows(state: HistoryStreamState) -> dict[str, List[dict[str, Any]]]: + """Return new rows added since the last call.""" + updates = await asyncio.to_thread(_stream_new_rows_sync, state) + return updates + + +def _stream_new_rows_sync(state: HistoryStreamState) -> dict[str, List[dict[str, Any]]]: + conn = _connect() + try: + events = conn.execute( + """ + SELECT id, event_id, event_type, event_status, eventbus_name, phase, event_json, inserted_at + FROM events_log + WHERE id > ? + ORDER BY id ASC + """, + (state.last_event_id,), + ).fetchall() + + results = conn.execute( + """ + SELECT id, event_id, event_result_id, handler_name, status, phase, result_repr, error_repr, + eventbus_name, event_result_json, inserted_at + FROM event_results_log + WHERE id > ? + ORDER BY id ASC + """, + (state.last_result_id,), + ).fetchall() + + if events: + state.last_event_id = events[-1]['id'] + if results: + state.last_result_id = results[-1]['id'] + + return { + 'events': [dict(row) for row in events], + 'results': [dict(row) for row in results], + } + finally: + conn.close() diff --git a/ui/main.py b/ui/main.py new file mode 100644 index 0000000..a679377 --- /dev/null +++ b/ui/main.py @@ -0,0 +1,563 @@ +from __future__ import annotations + +import asyncio +import json +from datetime import datetime +from typing import Annotated, Any + +from fastapi import FastAPI, Query, WebSocket, WebSocketDisconnect +from fastapi.responses import HTMLResponse, JSONResponse + +from . import db +from .config import resolve_db_path + +app = FastAPI(title='bubus event monitor', version='0.1.0') + + +def _format_timestamp(value: str | None) -> str | None: + if not value: + return None + # SQLite timestamp string -> ISO 8601 + try: + return datetime.fromisoformat(value.replace('Z', '+00:00')).isoformat() + except ValueError: + return value + + +async def _fetch_events(limit: int) -> list[dict[str, Any]]: + rows = await db.fetch_events(limit) + for row in rows: + row['inserted_at'] = _format_timestamp(row.get('inserted_at')) + return rows + + +async def _fetch_results(limit: int) -> list[dict[str, Any]]: + rows = await db.fetch_results(limit) + for row in rows: + row['inserted_at'] = _format_timestamp(row.get('inserted_at')) + return rows + + +@app.get('/', response_class=HTMLResponse) +async def index() -> str: + return """ + + + + + bubus Event Monitor + + + +
+

bubus Event Monitor

+
+ Database: + connecting… + +
+
+
+ +
+ + + +
+
+
+ + + + """ + + +@app.get('/events') +async def list_events(limit: Annotated[int, Query(ge=1, le=200)] = 20) -> JSONResponse: + rows = await _fetch_events(limit) + return JSONResponse(rows) + + +@app.get('/results') +async def list_results(limit: Annotated[int, Query(ge=1, le=200)] = 20) -> JSONResponse: + rows = await _fetch_results(limit) + return JSONResponse(rows) + + +@app.get('/meta') +async def meta() -> dict[str, Any]: + db_path = resolve_db_path() + exists = db_path.exists() + return { + 'db_path': str(db_path), + 'db_exists': exists, + } + + +@app.websocket('/ws/events') +async def websocket_events(socket: WebSocket) -> None: + await socket.accept() + state = db.HistoryStreamState() + try: + # Prime with latest IDs so we only broadcast new rows + latest_events = await _fetch_events(1) + latest_results = await _fetch_results(1) + if latest_events: + state.last_event_id = latest_events[0]['id'] + if latest_results: + state.last_result_id = latest_results[0]['id'] + + while True: + updates = await db.stream_new_rows(state) + if updates['events'] or updates['results']: + for key in ('events', 'results'): + for row in updates[key]: + row['inserted_at'] = _format_timestamp(row.get('inserted_at')) + await socket.send_text(json.dumps(updates)) + await asyncio.sleep(1.0) + except WebSocketDisconnect: + return + except Exception as exc: # pragma: no cover - surface to client + await socket.send_text(json.dumps({'error': str(exc)})) + await asyncio.sleep(0.5) diff --git a/ui/test_events.py b/ui/test_events.py new file mode 100644 index 0000000..b8225db --- /dev/null +++ b/ui/test_events.py @@ -0,0 +1,158 @@ +"""Utility script to generate synthetic events for the monitor app.""" + +from __future__ import annotations + +import argparse +import asyncio +import random +import string +from typing import Sequence + +from bubus import BaseEvent, EventBus, SQLiteHistoryMirrorMiddleware + +from .config import resolve_db_path + + +class RandomTestEvent(BaseEvent): + abc_payload_field: str + xyz_category_field: str + route_hint: str | None = None + + +class FollowUpEvent(BaseEvent): + abc_parent_payload_field: str + xyz_detail_field: str + depth: int + + +class AuditTrailEvent(BaseEvent): + source_event_id: str + handler_name: str + message: str + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description='Generate random events for the bubus monitor.') + parser.add_argument('--min-delay', type=float, default=0.2, help='Minimum delay between root events (seconds).') + parser.add_argument('--max-delay', type=float, default=1.0, help='Maximum delay between root events (seconds).') + parser.add_argument('--error-rate', type=float, default=0.2, help='Fraction of handlers that should raise an error.') + parser.add_argument('--child-rate', type=float, default=0.4, help='Probability of dispatching follow-up events.') + parser.add_argument('--audit-rate', type=float, default=0.5, help='Probability of emitting audit trail events.') + parser.add_argument('--max-depth', type=int, default=2, help='Maximum nested follow-up depth.') + parser.add_argument('--burst-size', type=int, default=4, help='Number of root events per burst.') + parser.add_argument('--categories', nargs='*', default=['alpha', 'beta', 'gamma'], help='Event categories to sample.') + parser.add_argument('--concurrent', type=int, default=3, help='Number of concurrent root event producers.') + parser.add_argument('--events', type=int, default=0, help='Optional count. 0 = run forever.') + return parser.parse_args() + + +def _random_text(length: int = 8) -> str: + return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) + + +async def run_generator(args: argparse.Namespace) -> None: + db_path = resolve_db_path() + db_path.parent.mkdir(parents=True, exist_ok=True) + middleware = SQLiteHistoryMirrorMiddleware(db_path) + bus = EventBus(name='MonitorGenerator', middlewares=[middleware], parallel_handlers=True) + + categories: Sequence[str] = args.categories or ['default'] + + async def random_handler(event: RandomTestEvent) -> str: + await asyncio.sleep(random.uniform(0.35, 0.7)) + if random.random() < args.child_rate: + depth = random.randint(1, max(1, args.max_depth)) + await emit_followups(event, depth) + if random.random() < args.audit_rate: + bus.dispatch( + AuditTrailEvent( + source_event_id=event.event_id, + handler_name='random_handler', + message=f'Processed payload {event.abc_payload_field}', + ) + ) + if random.random() < args.error_rate: + raise RuntimeError(f'Flaky handler failed for payload={event.abc_payload_field}') + return event.abc_payload_field[::-1] + + async def analytics_handler(event: RandomTestEvent) -> None: + await asyncio.sleep(random.uniform(0.2, 0.5)) + if random.random() < args.audit_rate: + bus.dispatch( + AuditTrailEvent( + source_event_id=event.event_id, + handler_name='analytics_handler', + message=f'Category {event.xyz_category_field}', + ) + ) + + async def auditing_handler(event: RandomTestEvent) -> str: + await asyncio.sleep(random.uniform(0.25, 0.6)) + return f"route:{event.route_hint or 'default'}|category:{event.xyz_category_field}" + + async def followup_handler(event: FollowUpEvent) -> str: + await asyncio.sleep(random.uniform(0.3, 0.65)) + if random.random() < 0.3 and event.depth < args.max_depth: + await emit_followups(event, args.max_depth - event.depth) + return f'followup:{event.xyz_detail_field}' + + async def audit_handler(event: AuditTrailEvent) -> None: + await asyncio.sleep(random.uniform(0.2, 0.4)) + + bus.on('RandomTestEvent', random_handler) + bus.on('RandomTestEvent', analytics_handler) + bus.on('RandomTestEvent', auditing_handler) + bus.on('FollowUpEvent', followup_handler) + bus.on('AuditTrailEvent', audit_handler) + + print(f'🟒 Streaming events to {db_path}') + + async def producer_task(task_id: int) -> None: + emitted = 0 + while args.events == 0 or emitted < args.events: + burst = random.randint(1, max(1, args.burst_size)) + for _ in range(burst): + payload = _random_text(10) + event = RandomTestEvent( + abc_payload_field=payload, + xyz_category_field=random.choice(list(categories)), + route_hint=f'route-{task_id}-{random.randint(1, 3)}', + event_result_type=str, + ) + bus.dispatch(event) + emitted += 1 + if args.events and emitted >= args.events: + break + await asyncio.sleep(random.uniform(args.min_delay, args.max_delay)) + await asyncio.sleep(random.uniform(args.min_delay, args.max_delay)) + + async def emit_followups(parent_event: BaseEvent, remaining_depth: int) -> None: + depth = getattr(parent_event, 'depth', 0) + 1 + followup_count = random.randint(1, 2) + for _ in range(followup_count): + follow_up = FollowUpEvent( + abc_parent_payload_field=getattr(parent_event, 'abc_payload_field', parent_event.event_id), + xyz_detail_field=_random_text(6), + depth=depth, + event_result_type=str, + ) + bus.dispatch(follow_up) + if remaining_depth > 1 and random.random() < 0.6: + await asyncio.sleep(random.uniform(0.2, 0.4)) + await emit_followups(parent_event, remaining_depth - 1) + + try: + producers = [asyncio.create_task(producer_task(idx)) for idx in range(max(1, args.concurrent))] + await asyncio.gather(*producers) + await bus.wait_until_idle() + finally: + await bus.stop() + + +def main() -> None: + args = parse_args() + asyncio.run(run_generator(args)) + + +if __name__ == '__main__': + main()